import tensorflow as tf
from tensorflow.keras.models import Sequential,load_model
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing import image
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPool2D
from keras.callbacks import ReduceLROnPlateau
import seaborn as sns
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn import preprocessing
import pandas as pd
import matplotlib.pyplot as plt # MATPLOTLIB FOR PLOTTING
import numpy as np
import os
import cv2
import zipfile
from zipfile import ZipFile
# Extracting plant-seedlings-classification.zip file using python
zip_file_path='plant-seedlings-classification.zip'
with ZipFile(zip_file_path, 'r') as zip:
zip.extractall()
ls
Volume in drive C is Windows-SSD
Volume Serial Number is 4EAB-7A9D
Directory of C:\Users\srile\Desktop\AIML\Project_CNN
18-09-2022 07:00 <DIR> .
10-09-2022 11:26 <DIR> ..
10-09-2022 11:25 <DIR> .ipynb_checkpoints
10-09-2022 11:49 <DIR> __MACOSX
11-09-2022 11:20 <DIR> 17flowers
17-09-2022 23:44 26,829,112 cnn_model.h5
17-09-2022 23:44 8,958,240 cnn_model_weights.h5
10-09-2022 10:56 314,332 CV+1-+Problem_Statement.pdf
10-09-2022 10:55 174,823 CV1+FAQ.pdf
10-09-2022 10:56 486,692 CV1-+Milestones.pdf
10-09-2022 11:49 <DIR> plant-seedlings-classification
10-09-2022 11:13 1,720,926,126 plant-seedlings-classification.zip
10-09-2022 10:56 35,126 Prediction.jpg
18-09-2022 07:00 3,332,768 Untitled.ipynb
8 File(s) 1,761,057,219 bytes
6 Dir(s) 74,727,727,104 bytes free
plant-seedlings-classification.zip is extracted as shown above.
Hint: Create a DataFrame with 3 columns: Name of image, Species/class/type of image & actual image..
data_folder = 'plant-seedlings-classification'
seedlings_list = []
labels = []
# list all folders inside train directory
for sub_folder in os.listdir(data_folder):
if sub_folder=='train': #getting files with label train
print("Inside train folder..")
for class_name in os.listdir(os.path.join(data_folder, sub_folder)):
# read each image inside train directory one by one
if class_name != '.DS_Store':
print("Extracting images from folder : {}".format(class_name))
for image in os.listdir(os.path.join(data_folder,sub_folder,class_name)):
seedlings_dict={}
seedlings_dict["img_name"] = image
seedlings_dict["img_class"] = class_name
image = cv2.imread(os.path.join(data_folder,sub_folder,class_name,image))
seedlings_dict["img"]=image
seedlings_list.append(seedlings_dict)
Inside train folder.. Extracting images from folder : Black-grass Extracting images from folder : Charlock Extracting images from folder : Cleavers Extracting images from folder : Common Chickweed Extracting images from folder : Common wheat Extracting images from folder : Fat Hen Extracting images from folder : Loose Silky-bent Extracting images from folder : Maize Extracting images from folder : Scentless Mayweed Extracting images from folder : Shepherds Purse Extracting images from folder : Small-flowered Cranesbill Extracting images from folder : Sugar beet
#converting seedlings_list which is a list of dictionaries to pandas df
seedlings_df=pd.DataFrame(seedlings_list)
seedlings_df
| img_name | img_class | img | |
|---|---|---|---|
| 0 | 0050f38b3.png | Black-grass | [[[27, 50, 80], [18, 42, 71], [36, 57, 83], [4... |
| 1 | 0183fdf68.png | Black-grass | [[[37, 43, 55], [37, 43, 54], [40, 46, 57], [4... |
| 2 | 0260cffa8.png | Black-grass | [[[24, 32, 45], [21, 30, 44], [22, 30, 45], [2... |
| 3 | 05eedce4d.png | Black-grass | [[[51, 84, 108], [56, 89, 112], [54, 88, 110],... |
| 4 | 075d004bc.png | Black-grass | [[[165, 162, 162], [165, 161, 163], [160, 157,... |
| ... | ... | ... | ... |
| 4745 | fc293eacb.png | Sugar beet | [[[98, 94, 97], [93, 88, 94], [87, 82, 86], [8... |
| 4746 | fc441208c.png | Sugar beet | [[[35, 63, 92], [38, 67, 96], [34, 64, 94], [1... |
| 4747 | fed9406b2.png | Sugar beet | [[[44, 56, 72], [52, 63, 75], [53, 65, 75], [4... |
| 4748 | fef5e7066.png | Sugar beet | [[[144, 141, 145], [143, 139, 143], [146, 142,... |
| 4749 | ffa401155.png | Sugar beet | [[[71, 90, 99], [65, 81, 94], [68, 83, 97], [6... |
4750 rows × 3 columns
seedlings_df.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 4750 entries, 0 to 4749 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 img_name 4750 non-null object 1 img_class 4750 non-null object 2 img 4750 non-null object dtypes: object(3) memory usage: 111.5+ KB
The pandas dataframe is created with 3 columns:
img_name - Name of image
img_class - Species/class/type of image
img - actual image
# Let's see the various classes available in the pandas dataframe along with the counts..
seedlings_df['img_class'].value_counts()
Loose Silky-bent 654 Common Chickweed 611 Scentless Mayweed 516 Small-flowered Cranesbill 496 Fat Hen 475 Charlock 390 Sugar beet 385 Cleavers 287 Black-grass 263 Shepherds Purse 231 Common wheat 221 Maize 221 Name: img_class, dtype: int64
Hint: If input for function is 5, it should print 5 random images along with its labels.
### Function to select n random images and display images along with its species name
def img_display(df,n):
# visualizing the random images in the dataset along with their labels
x=df['img'].tolist()
y=df['img_class'].tolist()
# Visualization
import matplotlib.pyplot as plt
import numpy as np
rand = np.random.randint(0, len(x), n) # Generating n random numbers from total number of images
print(rand)
plt.figure(figsize=(20, 20))
for i,j in enumerate(rand):
plt.subplot(1, len(rand), i+1)
plt.imshow(x[j])
plt.title("{}".format(y[j]))
plt.axis('off')
plt.show()
## calling the function for seedlings_df and random number 10 - to display 10 images
img_display(seedlings_df,10)
[2846 3099 894 3458 4725 1577 3622 299 2727 4050]
# Creating x and y from the dataframe
X=seedlings_df[['img']]
y=seedlings_df[['img_class']]
y.describe()
| img_class | |
|---|---|
| count | 4750 |
| unique | 12 |
| top | Loose Silky-bent |
| freq | 654 |
y.head()
| img_class | |
|---|---|
| 0 | Black-grass |
| 1 | Black-grass |
| 2 | Black-grass |
| 3 | Black-grass |
| 4 | Black-grass |
X.head()
| img | |
|---|---|
| 0 | [[[27, 50, 80], [18, 42, 71], [36, 57, 83], [4... |
| 1 | [[[37, 43, 55], [37, 43, 54], [40, 46, 57], [4... |
| 2 | [[[24, 32, 45], [21, 30, 44], [22, 30, 45], [2... |
| 3 | [[[51, 84, 108], [56, 89, 112], [54, 88, 110],... |
| 4 | [[[165, 162, 162], [165, 161, 163], [160, 157,... |
X.shape
(4750, 1)
y.shape
(4750, 1)
## Encoding labels of the images
from tensorflow.keras.utils import to_categorical
classes = y.iloc[:,0].unique()
labels = preprocessing.LabelEncoder()
labels.fit(classes)
encodedlabels = labels.transform(y.iloc[:,0])
print("Encoded labels are : \n{}".format(encodedlabels))
print('\nClasses : \n{}'.format(str(labels.classes_)))
class_dict={l: i for (i, l) in enumerate(labels.classes_)}
print('\nClass dictionary : \n{}'.format(class_dict))
y=to_categorical(encodedlabels)
print("\ny after encoding : \n{}".format(y))
Encoded labels are :
[ 0 0 0 ... 11 11 11]
Classes :
['Black-grass' 'Charlock' 'Cleavers' 'Common Chickweed' 'Common wheat'
'Fat Hen' 'Loose Silky-bent' 'Maize' 'Scentless Mayweed'
'Shepherds Purse' 'Small-flowered Cranesbill' 'Sugar beet']
Class dictionary :
{'Black-grass': 0, 'Charlock': 1, 'Cleavers': 2, 'Common Chickweed': 3, 'Common wheat': 4, 'Fat Hen': 5, 'Loose Silky-bent': 6, 'Maize': 7, 'Scentless Mayweed': 8, 'Shepherds Purse': 9, 'Small-flowered Cranesbill': 10, 'Sugar beet': 11}
y after encoding :
[[1. 0. 0. ... 0. 0. 0.]
[1. 0. 0. ... 0. 0. 0.]
[1. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 1.]
[0. 0. 0. ... 0. 0. 1.]
[0. 0. 0. ... 0. 0. 1.]]
# Lets us first see the current dimensions of the images
img_shape=[]
for image in X.img.values:
img_shape.append(image.shape)
dimension = pd.DataFrame({'dim':img_shape})
dimension.head(10)
| dim | |
|---|---|
| 0 | (196, 196, 3) |
| 1 | (388, 388, 3) |
| 2 | (886, 886, 3) |
| 3 | (117, 117, 3) |
| 4 | (471, 471, 3) |
| 5 | (1074, 1074, 3) |
| 6 | (251, 251, 3) |
| 7 | (1899, 1900, 3) |
| 8 | (531, 531, 3) |
| 9 | (352, 352, 3) |
The images have different dimensions..
#Lets unify the shape of all images to (80,80,3)
image_new=[]
image_reshape=[]
for image in X.img.values:
image = cv2.resize(image, (80, 80))
image_new.append(image)
image_reshape.append(image.shape)
# lets create a dataframe to see the images and its dimensions
X_new = pd.DataFrame({'img':image_new, 'dim':image_reshape})
X_new.head()
| img | dim | |
|---|---|---|
| 0 | [[[22, 45, 76], [44, 65, 88], [63, 82, 102], [... | (80, 80, 3) |
| 1 | [[[44, 50, 57], [48, 51, 66], [52, 63, 80], [9... | (80, 80, 3) |
| 2 | [[[26, 35, 52], [25, 33, 54], [32, 45, 61], [3... | (80, 80, 3) |
| 3 | [[[50, 83, 107], [52, 86, 109], [53, 87, 110],... | (80, 80, 3) |
| 4 | [[[169, 168, 167], [165, 165, 163], [171, 169,... | (80, 80, 3) |
The images are reshaped to (80,80,3)
X = np.array(X_new['img'].tolist())
X.shape
(4750, 80, 80, 3)
print(X.shape, y.shape)
(4750, 80, 80, 3) (4750, 12)
# Normalizing the images by dividing it by 255
X = X.astype('float32')
X = X/255
X
array([[[[0.08627451, 0.1764706 , 0.29803923],
[0.17254902, 0.25490198, 0.34509805],
[0.24705882, 0.32156864, 0.4 ],
...,
[0.2509804 , 0.30588236, 0.35686275],
[0.22352941, 0.30588236, 0.34901962],
[0.22352941, 0.3254902 , 0.35686275]],
[[0.10196079, 0.19607843, 0.32156864],
[0.20392157, 0.2784314 , 0.3764706 ],
[0.16078432, 0.23529412, 0.32941177],
...,
[0.23529412, 0.2901961 , 0.34117648],
[0.22745098, 0.28235295, 0.32941177],
[0.23529412, 0.29411766, 0.34117648]],
[[0.12156863, 0.21568628, 0.3372549 ],
[0.12156863, 0.2 , 0.31764707],
[0.1254902 , 0.20392157, 0.30588236],
...,
[0.24705882, 0.29803923, 0.3529412 ],
[0.21960784, 0.26666668, 0.31764707],
[0.21960784, 0.2784314 , 0.32941177]],
...,
[[0.6117647 , 0.58431375, 0.56078434],
[0.6117647 , 0.58431375, 0.5647059 ],
[0.5254902 , 0.5019608 , 0.4862745 ],
...,
[0.6117647 , 0.5803922 , 0.5647059 ],
[0.54509807, 0.52156866, 0.50980395],
[0.2627451 , 0.23529412, 0.26666668]],
[[0.6156863 , 0.5921569 , 0.5686275 ],
[0.60784316, 0.5882353 , 0.5647059 ],
[0.52156866, 0.5019608 , 0.4862745 ],
...,
[0.60784316, 0.5764706 , 0.56078434],
[0.54901963, 0.5254902 , 0.5058824 ],
[0.2784314 , 0.25882354, 0.27450982]],
[[0.6117647 , 0.5882353 , 0.5568628 ],
[0.6039216 , 0.5803922 , 0.5529412 ],
[0.50980395, 0.49411765, 0.47843137],
...,
[0.61960787, 0.5882353 , 0.5647059 ],
[0.5411765 , 0.5176471 , 0.5019608 ],
[0.27058825, 0.24705882, 0.2627451 ]]],
[[[0.17254902, 0.19607843, 0.22352941],
[0.1882353 , 0.2 , 0.25882354],
[0.20392157, 0.24705882, 0.3137255 ],
...,
[0.14117648, 0.1764706 , 0.25882354],
[0.33333334, 0.4117647 , 0.47058824],
[0.38431373, 0.45882353, 0.5019608 ]],
[[0.1882353 , 0.20784314, 0.25882354],
[0.18431373, 0.23529412, 0.25882354],
[0.21568628, 0.24705882, 0.3137255 ],
...,
[0.10588235, 0.15294118, 0.23137255],
[0.30980393, 0.3647059 , 0.43137255],
[0.16862746, 0.25490198, 0.34901962]],
[[0.21176471, 0.24313726, 0.28627452],
[0.2 , 0.24705882, 0.29411766],
[0.20392157, 0.2627451 , 0.32941177],
...,
[0.13333334, 0.21176471, 0.2784314 ],
[0.10980392, 0.16862746, 0.2627451 ],
[0.22352941, 0.30588236, 0.38039216]],
...,
[[0.13333334, 0.20392157, 0.3137255 ],
[0.10196079, 0.16078432, 0.2901961 ],
[0.12156863, 0.18431373, 0.28235295],
...,
[0.25490198, 0.32156864, 0.38039216],
[0.23529412, 0.29411766, 0.3647059 ],
[0.23529412, 0.29411766, 0.38039216]],
[[0.11372549, 0.18431373, 0.32156864],
[0.10588235, 0.19607843, 0.32941177],
[0.13333334, 0.21568628, 0.3372549 ],
...,
[0.23137255, 0.29411766, 0.36862746],
[0.25882354, 0.3137255 , 0.38431373],
[0.25490198, 0.31764707, 0.36078432]],
[[0.13725491, 0.21176471, 0.3372549 ],
[0.12941177, 0.21176471, 0.3254902 ],
[0.15294118, 0.22352941, 0.32941177],
...,
[0.2509804 , 0.3254902 , 0.39215687],
[0.24313726, 0.30588236, 0.37254903],
[0.24313726, 0.30588236, 0.37254903]]],
[[[0.10196079, 0.13725491, 0.20392157],
[0.09803922, 0.12941177, 0.21176471],
[0.1254902 , 0.1764706 , 0.23921569],
...,
[0.53333336, 0.54509807, 0.5647059 ],
[0.52156866, 0.5294118 , 0.5411765 ],
[0.49019608, 0.49411765, 0.49803922]],
[[0.14117648, 0.23921569, 0.3137255 ],
[0.11764706, 0.16078432, 0.23137255],
[0.11764706, 0.15294118, 0.21960784],
...,
[0.54509807, 0.5529412 , 0.5568628 ],
[0.5019608 , 0.5176471 , 0.53333336],
[0.49019608, 0.49803922, 0.49803922]],
[[0.16078432, 0.21176471, 0.2784314 ],
[0.18039216, 0.24313726, 0.29411766],
[0.19215687, 0.24313726, 0.3019608 ],
...,
[0.5372549 , 0.5411765 , 0.5568628 ],
[0.5176471 , 0.5294118 , 0.53333336],
[0.49411765, 0.5058824 , 0.5019608 ]],
...,
[[0.1254902 , 0.19607843, 0.30588236],
[0.21960784, 0.29411766, 0.35686275],
[0.27450982, 0.3529412 , 0.39607844],
...,
[0.35686275, 0.3764706 , 0.3647059 ],
[0.17254902, 0.25882354, 0.24313726],
[0.30980393, 0.34901962, 0.32941177]],
[[0.2509804 , 0.34117648, 0.4117647 ],
[0.15686275, 0.23921569, 0.31764707],
[0.2901961 , 0.3529412 , 0.39607844],
...,
[0.4509804 , 0.46666667, 0.47058824],
[0.44705883, 0.4509804 , 0.4627451 ],
[0.3764706 , 0.38431373, 0.3764706 ]],
[[0.21960784, 0.29411766, 0.3647059 ],
[0.25490198, 0.3254902 , 0.38431373],
[0.22352941, 0.28627452, 0.3647059 ],
...,
[0.44705883, 0.4509804 , 0.4745098 ],
[0.44313726, 0.4509804 , 0.4509804 ],
[0.42745098, 0.42352942, 0.4392157 ]]],
...,
[[[0.20784314, 0.26666668, 0.3019608 ],
[0.20392157, 0.26666668, 0.30588236],
[0.19215687, 0.24313726, 0.29803923],
...,
[0.16862746, 0.16078432, 0.17254902],
[0.15294118, 0.16078432, 0.19215687],
[0.15686275, 0.1882353 , 0.23137255]],
[[0.16862746, 0.2509804 , 0.3254902 ],
[0.21176471, 0.29411766, 0.3529412 ],
[0.21960784, 0.27450982, 0.33333334],
...,
[0.19215687, 0.19607843, 0.21568628],
[0.1764706 , 0.1764706 , 0.19607843],
[0.16078432, 0.16470589, 0.1882353 ]],
[[0.15294118, 0.25490198, 0.3529412 ],
[0.19215687, 0.28235295, 0.35686275],
[0.2 , 0.27450982, 0.34117648],
...,
[0.19607843, 0.18431373, 0.20784314],
[0.20392157, 0.20784314, 0.22745098],
[0.14117648, 0.14901961, 0.1764706 ]],
...,
[[0.14509805, 0.21176471, 0.2901961 ],
[0.21568628, 0.28235295, 0.35686275],
[0.3137255 , 0.38039216, 0.43529412],
...,
[0.2 , 0.24705882, 0.3764706 ],
[0.22352941, 0.27450982, 0.4 ],
[0.21960784, 0.28235295, 0.4117647 ]],
[[0.1764706 , 0.23529412, 0.2901961 ],
[0.12941177, 0.20392157, 0.28235295],
[0.16078432, 0.22745098, 0.3137255 ],
...,
[0.21176471, 0.27058825, 0.3764706 ],
[0.20784314, 0.2627451 , 0.38431373],
[0.23921569, 0.30588236, 0.42352942]],
[[0.13725491, 0.2 , 0.2627451 ],
[0.11764706, 0.17254902, 0.2509804 ],
[0.10196079, 0.16862746, 0.25882354],
...,
[0.10196079, 0.29803923, 0.28627452],
[0.09019608, 0.28235295, 0.27058825],
[0.11764706, 0.28627452, 0.27450982]]],
[[[0.5686275 , 0.5647059 , 0.57254905],
[0.5882353 , 0.5921569 , 0.59607846],
[0.6 , 0.5921569 , 0.5921569 ],
...,
[0.30588236, 0.3019608 , 0.29411766],
[0.22745098, 0.23921569, 0.23137255],
[0.13725491, 0.1254902 , 0.14117648]],
[[0.5647059 , 0.5686275 , 0.5803922 ],
[0.5882353 , 0.5882353 , 0.59607846],
[0.6 , 0.6 , 0.6 ],
...,
[0.50980395, 0.49019608, 0.48235294],
[0.17254902, 0.16470589, 0.1764706 ],
[0.15686275, 0.13725491, 0.15686275]],
[[0.5529412 , 0.56078434, 0.5647059 ],
[0.59607846, 0.59607846, 0.6 ],
[0.59607846, 0.59607846, 0.5921569 ],
...,
[0.58431375, 0.5686275 , 0.5686275 ],
[0.20392157, 0.19215687, 0.1882353 ],
[0.1764706 , 0.16862746, 0.18039216]],
...,
[[0.62352943, 0.6313726 , 0.62352943],
[0.627451 , 0.627451 , 0.627451 ],
[0.62352943, 0.627451 , 0.62352943],
...,
[0.21568628, 0.2784314 , 0.36078432],
[0.24705882, 0.32156864, 0.39607844],
[0.18431373, 0.24705882, 0.3137255 ]],
[[0.6156863 , 0.60784316, 0.61960787],
[0.64705884, 0.6509804 , 0.6431373 ],
[0.6392157 , 0.63529414, 0.6431373 ],
...,
[0.1254902 , 0.2 , 0.28627452],
[0.18039216, 0.26666668, 0.34509805],
[0.19215687, 0.2784314 , 0.3372549 ]],
[[0.627451 , 0.63529414, 0.627451 ],
[0.6156863 , 0.62352943, 0.627451 ],
[0.6431373 , 0.63529414, 0.6392157 ],
...,
[0.18431373, 0.2901961 , 0.38431373],
[0.2 , 0.29803923, 0.39215687],
[0.14509805, 0.22745098, 0.30588236]]],
[[[0.28235295, 0.3372549 , 0.3882353 ],
[0.29803923, 0.3529412 , 0.39607844],
[0.2901961 , 0.3529412 , 0.39607844],
...,
[0.1764706 , 0.29411766, 0.40392157],
[0.16862746, 0.26666668, 0.36078432],
[0.23529412, 0.3019608 , 0.3529412 ]],
[[0.29803923, 0.36078432, 0.40784314],
[0.34509805, 0.39215687, 0.43137255],
[0.30980393, 0.36862746, 0.42352942],
...,
[0.1764706 , 0.23921569, 0.31764707],
[0.18039216, 0.24705882, 0.3137255 ],
[0.21960784, 0.27058825, 0.33333334]],
[[0.25490198, 0.3254902 , 0.38431373],
[0.26666668, 0.3372549 , 0.38431373],
[0.28235295, 0.3372549 , 0.3882353 ],
...,
[0.1254902 , 0.16470589, 0.21568628],
[0.09019608, 0.14901961, 0.21960784],
[0.22352941, 0.2784314 , 0.32941177]],
...,
[[0.20784314, 0.25490198, 0.29803923],
[0.16078432, 0.2509804 , 0.34117648],
[0.19215687, 0.32941177, 0.42745098],
...,
[0.14901961, 0.25490198, 0.3647059 ],
[0.2 , 0.2901961 , 0.3764706 ],
[0.09411765, 0.16862746, 0.25490198]],
[[0.22745098, 0.27450982, 0.30980393],
[0.20392157, 0.27058825, 0.31764707],
[0.21568628, 0.3254902 , 0.4117647 ],
...,
[0.17254902, 0.27058825, 0.37254903],
[0.20784314, 0.3137255 , 0.4 ],
[0.14901961, 0.22745098, 0.32156864]],
[[0.21176471, 0.2784314 , 0.32941177],
[0.21960784, 0.2901961 , 0.37254903],
[0.23529412, 0.29803923, 0.37254903],
...,
[0.14901961, 0.2627451 , 0.34901962],
[0.2 , 0.29803923, 0.38431373],
[0.16862746, 0.2627451 , 0.3372549 ]]]], dtype=float32)
y
array([[1., 0., 0., ..., 0., 0., 0.],
[1., 0., 0., ..., 0., 0., 0.],
[1., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 0., ..., 0., 0., 1.]], dtype=float32)
# Splitting the data into train and test by 80:20 proportion
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
print("Shape of X_train:",X_train.shape)
print("Shape of X_test:",X_test.shape)
print("Shape of y_train:",y_train.shape)
print("Shape of y_test:",y_test.shape)
Shape of X_train: (3800, 80, 80, 3) Shape of X_test: (950, 80, 80, 3) Shape of y_train: (3800, 12) Shape of y_test: (950, 12)
# no.of classes
num_class_train = y_train.shape[1]
print("The number of classes in train data:",num_class_train)
The number of classes in train data: 12
# no.of classes
num_class_test = y_test.shape[1]
print("The number of classes in test data:",num_class_test)
The number of classes in test data: 12
# create the CNN model for plant seedlings data
num_classes=num_class_train
def create_cnn_model():
# Initialising the CNN classifier
cnn_model = Sequential()
# Add a Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
cnn_model.add(Conv2D(32, (3, 3), input_shape = (80, 80, 3), activation = 'relu'))
cnn_model.add(BatchNormalization(axis=3))
# Add a Max Pooling layer of size 2X2
cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
# Add another Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
cnn_model.add(Conv2D(64, (3, 3), activation = 'relu'))
# Adding another pooling layer
cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
cnn_model.add(BatchNormalization(axis=3))
cnn_model.add(Dropout(0.25))
# Add another Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
cnn_model.add(Conv2D(128, (3, 3), activation = 'relu'))
# Adding another pooling layer
cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
cnn_model.add(BatchNormalization(axis=3))
cnn_model.add(Dropout(0.25))
# Flattening the layer before fully connected layers
cnn_model.add(Flatten())
# Adding a fully connected layer with 512 neurons
cnn_model.add(Dense(units = 256, activation = 'relu'))
cnn_model.add(BatchNormalization())
cnn_model.add(Dropout(0.5))
# Adding a fully connected layer with 128 neurons
cnn_model.add(Dense(units = 128, activation = 'relu'))
cnn_model.add(BatchNormalization())
cnn_model.add(Dropout(0.4))
# The final output layer with 5 neuron to predict the categorical classifcation
cnn_model.add(Dense(units = 12, activation = 'softmax'))
# Compile the network :
optimizer = tf.keras.optimizers.Adam()
cnn_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
cnn_model.summary()
return cnn_model
# Best suitable parameters for this model can be
epoch = 50
batch_size = 128
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',patience=6,verbose=1, factor=0.5,min_lr=0.00001)
# Train the CNN classifier model for Plant seedlings data
cnn_model = create_cnn_model()
psc_cnn_model = cnn_model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epoch, batch_size=batch_size,verbose=2,callbacks=[learning_rate_reduction])
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 78, 78, 32) 896
batch_normalization (BatchN (None, 78, 78, 32) 128
ormalization)
max_pooling2d (MaxPooling2D (None, 39, 39, 32) 0
)
conv2d_1 (Conv2D) (None, 37, 37, 64) 18496
max_pooling2d_1 (MaxPooling (None, 18, 18, 64) 0
2D)
batch_normalization_1 (Batc (None, 18, 18, 64) 256
hNormalization)
dropout (Dropout) (None, 18, 18, 64) 0
conv2d_2 (Conv2D) (None, 16, 16, 128) 73856
max_pooling2d_2 (MaxPooling (None, 8, 8, 128) 0
2D)
batch_normalization_2 (Batc (None, 8, 8, 128) 512
hNormalization)
dropout_1 (Dropout) (None, 8, 8, 128) 0
flatten (Flatten) (None, 8192) 0
dense (Dense) (None, 256) 2097408
batch_normalization_3 (Batc (None, 256) 1024
hNormalization)
dropout_2 (Dropout) (None, 256) 0
dense_1 (Dense) (None, 128) 32896
batch_normalization_4 (Batc (None, 128) 512
hNormalization)
dropout_3 (Dropout) (None, 128) 0
dense_2 (Dense) (None, 12) 1548
=================================================================
Total params: 2,227,532
Trainable params: 2,226,316
Non-trainable params: 1,216
_________________________________________________________________
Epoch 1/50
30/30 - 32s - loss: 2.4248 - accuracy: 0.2987 - val_loss: 2.6946 - val_accuracy: 0.0463 - lr: 0.0010 - 32s/epoch - 1s/step
Epoch 2/50
30/30 - 28s - loss: 1.4655 - accuracy: 0.5308 - val_loss: 3.1874 - val_accuracy: 0.0463 - lr: 0.0010 - 28s/epoch - 950ms/step
Epoch 3/50
30/30 - 28s - loss: 1.1171 - accuracy: 0.6300 - val_loss: 3.8787 - val_accuracy: 0.1368 - lr: 0.0010 - 28s/epoch - 946ms/step
Epoch 4/50
30/30 - 31s - loss: 0.9322 - accuracy: 0.6897 - val_loss: 4.2884 - val_accuracy: 0.1379 - lr: 0.0010 - 31s/epoch - 1s/step
Epoch 5/50
30/30 - 33s - loss: 0.7593 - accuracy: 0.7521 - val_loss: 4.6993 - val_accuracy: 0.1379 - lr: 0.0010 - 33s/epoch - 1s/step
Epoch 6/50
30/30 - 28s - loss: 0.6474 - accuracy: 0.7842 - val_loss: 6.1117 - val_accuracy: 0.1379 - lr: 0.0010 - 28s/epoch - 933ms/step
Epoch 7/50
30/30 - 27s - loss: 0.5440 - accuracy: 0.8121 - val_loss: 6.7257 - val_accuracy: 0.1379 - lr: 0.0010 - 27s/epoch - 912ms/step
Epoch 8/50
30/30 - 27s - loss: 0.4866 - accuracy: 0.8358 - val_loss: 9.6443 - val_accuracy: 0.1379 - lr: 0.0010 - 27s/epoch - 908ms/step
Epoch 9/50
30/30 - 31s - loss: 0.4014 - accuracy: 0.8658 - val_loss: 10.4723 - val_accuracy: 0.1379 - lr: 0.0010 - 31s/epoch - 1s/step
Epoch 10/50
Epoch 10: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
30/30 - 33s - loss: 0.3631 - accuracy: 0.8774 - val_loss: 10.9873 - val_accuracy: 0.1379 - lr: 0.0010 - 33s/epoch - 1s/step
Epoch 11/50
30/30 - 27s - loss: 0.3126 - accuracy: 0.8995 - val_loss: 9.1759 - val_accuracy: 0.1379 - lr: 5.0000e-04 - 27s/epoch - 914ms/step
Epoch 12/50
30/30 - 27s - loss: 0.2845 - accuracy: 0.9168 - val_loss: 10.3792 - val_accuracy: 0.1379 - lr: 5.0000e-04 - 27s/epoch - 909ms/step
Epoch 13/50
30/30 - 28s - loss: 0.2343 - accuracy: 0.9263 - val_loss: 9.2652 - val_accuracy: 0.1379 - lr: 5.0000e-04 - 28s/epoch - 940ms/step
Epoch 14/50
30/30 - 30s - loss: 0.2135 - accuracy: 0.9339 - val_loss: 8.1781 - val_accuracy: 0.1421 - lr: 5.0000e-04 - 30s/epoch - 1s/step
Epoch 15/50
30/30 - 29s - loss: 0.2096 - accuracy: 0.9342 - val_loss: 7.4493 - val_accuracy: 0.1442 - lr: 5.0000e-04 - 29s/epoch - 982ms/step
Epoch 16/50
30/30 - 30s - loss: 0.1963 - accuracy: 0.9387 - val_loss: 6.1388 - val_accuracy: 0.1516 - lr: 5.0000e-04 - 30s/epoch - 1s/step
Epoch 17/50
30/30 - 28s - loss: 0.1773 - accuracy: 0.9437 - val_loss: 5.6623 - val_accuracy: 0.2126 - lr: 5.0000e-04 - 28s/epoch - 927ms/step
Epoch 18/50
30/30 - 38s - loss: 0.1634 - accuracy: 0.9471 - val_loss: 4.3661 - val_accuracy: 0.3042 - lr: 5.0000e-04 - 38s/epoch - 1s/step
Epoch 19/50
30/30 - 38s - loss: 0.1505 - accuracy: 0.9563 - val_loss: 3.6646 - val_accuracy: 0.3137 - lr: 5.0000e-04 - 38s/epoch - 1s/step
Epoch 20/50
30/30 - 33s - loss: 0.1335 - accuracy: 0.9616 - val_loss: 3.7401 - val_accuracy: 0.3305 - lr: 5.0000e-04 - 33s/epoch - 1s/step
Epoch 21/50
30/30 - 31s - loss: 0.1301 - accuracy: 0.9603 - val_loss: 2.8989 - val_accuracy: 0.3916 - lr: 5.0000e-04 - 31s/epoch - 1s/step
Epoch 22/50
30/30 - 32s - loss: 0.1016 - accuracy: 0.9747 - val_loss: 1.6926 - val_accuracy: 0.5421 - lr: 5.0000e-04 - 32s/epoch - 1s/step
Epoch 23/50
30/30 - 33s - loss: 0.1152 - accuracy: 0.9687 - val_loss: 2.1364 - val_accuracy: 0.5126 - lr: 5.0000e-04 - 33s/epoch - 1s/step
Epoch 24/50
30/30 - 31s - loss: 0.1038 - accuracy: 0.9700 - val_loss: 1.0530 - val_accuracy: 0.7095 - lr: 5.0000e-04 - 31s/epoch - 1s/step
Epoch 25/50
30/30 - 35s - loss: 0.0883 - accuracy: 0.9811 - val_loss: 1.4221 - val_accuracy: 0.6337 - lr: 5.0000e-04 - 35s/epoch - 1s/step
Epoch 26/50
30/30 - 32s - loss: 0.0901 - accuracy: 0.9768 - val_loss: 1.2080 - val_accuracy: 0.6874 - lr: 5.0000e-04 - 32s/epoch - 1s/step
Epoch 27/50
30/30 - 29s - loss: 0.0874 - accuracy: 0.9753 - val_loss: 1.5978 - val_accuracy: 0.6263 - lr: 5.0000e-04 - 29s/epoch - 979ms/step
Epoch 28/50
30/30 - 30s - loss: 0.0741 - accuracy: 0.9803 - val_loss: 0.6296 - val_accuracy: 0.8242 - lr: 5.0000e-04 - 30s/epoch - 1s/step
Epoch 29/50
30/30 - 28s - loss: 0.0727 - accuracy: 0.9805 - val_loss: 0.6842 - val_accuracy: 0.8147 - lr: 5.0000e-04 - 28s/epoch - 925ms/step
Epoch 30/50
30/30 - 27s - loss: 0.0670 - accuracy: 0.9834 - val_loss: 0.6284 - val_accuracy: 0.8232 - lr: 5.0000e-04 - 27s/epoch - 905ms/step
Epoch 31/50
30/30 - 27s - loss: 0.0583 - accuracy: 0.9858 - val_loss: 0.6564 - val_accuracy: 0.8358 - lr: 5.0000e-04 - 27s/epoch - 907ms/step
Epoch 32/50
30/30 - 27s - loss: 0.0573 - accuracy: 0.9853 - val_loss: 0.7970 - val_accuracy: 0.7926 - lr: 5.0000e-04 - 27s/epoch - 908ms/step
Epoch 33/50
30/30 - 28s - loss: 0.0583 - accuracy: 0.9855 - val_loss: 0.9044 - val_accuracy: 0.7558 - lr: 5.0000e-04 - 28s/epoch - 927ms/step
Epoch 34/50
30/30 - 28s - loss: 0.0578 - accuracy: 0.9847 - val_loss: 0.5659 - val_accuracy: 0.8389 - lr: 5.0000e-04 - 28s/epoch - 924ms/step
Epoch 35/50
30/30 - 27s - loss: 0.0457 - accuracy: 0.9897 - val_loss: 0.5158 - val_accuracy: 0.8505 - lr: 5.0000e-04 - 27s/epoch - 911ms/step
Epoch 36/50
30/30 - 27s - loss: 0.0472 - accuracy: 0.9876 - val_loss: 0.5833 - val_accuracy: 0.8411 - lr: 5.0000e-04 - 27s/epoch - 914ms/step
Epoch 37/50
30/30 - 28s - loss: 0.0529 - accuracy: 0.9858 - val_loss: 0.7941 - val_accuracy: 0.8126 - lr: 5.0000e-04 - 28s/epoch - 922ms/step
Epoch 38/50
30/30 - 28s - loss: 0.0487 - accuracy: 0.9879 - val_loss: 0.6215 - val_accuracy: 0.8368 - lr: 5.0000e-04 - 28s/epoch - 941ms/step
Epoch 39/50
30/30 - 30s - loss: 0.0443 - accuracy: 0.9882 - val_loss: 0.8723 - val_accuracy: 0.7842 - lr: 5.0000e-04 - 30s/epoch - 996ms/step
Epoch 40/50
30/30 - 29s - loss: 0.0439 - accuracy: 0.9895 - val_loss: 0.6470 - val_accuracy: 0.8200 - lr: 5.0000e-04 - 29s/epoch - 982ms/step
Epoch 41/50
Epoch 41: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
30/30 - 34s - loss: 0.0427 - accuracy: 0.9887 - val_loss: 0.7133 - val_accuracy: 0.8379 - lr: 5.0000e-04 - 34s/epoch - 1s/step
Epoch 42/50
30/30 - 31s - loss: 0.0347 - accuracy: 0.9918 - val_loss: 0.5251 - val_accuracy: 0.8558 - lr: 2.5000e-04 - 31s/epoch - 1s/step
Epoch 43/50
30/30 - 34s - loss: 0.0364 - accuracy: 0.9900 - val_loss: 0.6979 - val_accuracy: 0.8316 - lr: 2.5000e-04 - 34s/epoch - 1s/step
Epoch 44/50
30/30 - 36s - loss: 0.0287 - accuracy: 0.9947 - val_loss: 0.6235 - val_accuracy: 0.8463 - lr: 2.5000e-04 - 36s/epoch - 1s/step
Epoch 45/50
30/30 - 34s - loss: 0.0247 - accuracy: 0.9961 - val_loss: 0.6323 - val_accuracy: 0.8537 - lr: 2.5000e-04 - 34s/epoch - 1s/step
Epoch 46/50
30/30 - 34s - loss: 0.0283 - accuracy: 0.9926 - val_loss: 0.7245 - val_accuracy: 0.8274 - lr: 2.5000e-04 - 34s/epoch - 1s/step
Epoch 47/50
30/30 - 33s - loss: 0.0300 - accuracy: 0.9929 - val_loss: 0.5087 - val_accuracy: 0.8642 - lr: 2.5000e-04 - 33s/epoch - 1s/step
Epoch 48/50
30/30 - 32s - loss: 0.0279 - accuracy: 0.9942 - val_loss: 0.5195 - val_accuracy: 0.8484 - lr: 2.5000e-04 - 32s/epoch - 1s/step
Epoch 49/50
30/30 - 32s - loss: 0.0229 - accuracy: 0.9958 - val_loss: 1.0488 - val_accuracy: 0.7779 - lr: 2.5000e-04 - 32s/epoch - 1s/step
Epoch 50/50
30/30 - 33s - loss: 0.0237 - accuracy: 0.9950 - val_loss: 0.5416 - val_accuracy: 0.8568 - lr: 2.5000e-04 - 33s/epoch - 1s/step
psc_cnn_model_result = cnn_model.evaluate(X_test, y_test)
30/30 [==============================] - 2s 77ms/step - loss: 0.5416 - accuracy: 0.8568
print('Test loss: ', psc_cnn_model_result[0])
print('Test accuracy: ', psc_cnn_model_result[1])
Test loss: 0.5416200757026672 Test accuracy: 0.8568421006202698
The model has produced 99.50% Train accuracy and 85.68% Test accuracy It has 2% Train loss and and 54% Test loss. The model looks promising to predict the plan seedlings classification.
loss_train = psc_cnn_model.history['loss']
loss_val = psc_cnn_model.history['val_loss']
epochs = range(1,epoch+1)
plt.plot(epochs, loss_train, 'g', label='Training loss')
plt.plot(epochs, loss_val, 'b', label='validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
Acc_train = psc_cnn_model.history['accuracy']
Acc_val = psc_cnn_model.history['val_accuracy']
epochs = range(1,epoch+1)
plt.plot(epochs, Acc_train, 'g', label='Training accuracy')
plt.plot(epochs, Acc_val, 'b', label='validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
# Saving the model for future use
cnn_model.save('./cnn_model.h5')
cnn_model.save_weights('./cnn_model_weights.h5')
# Selectinga random image
test_image = cv2.imread('./plant-seedlings-classification/train/Maize/1b1ab91eb.png')
# Resize the image to 80*80 shape to be compatible with the model
test_image = cv2.resize(test_image,(80,80))
print("Shape of the test_image : {}".format(test_image.shape))
# If not compatible expand the dimensions to match with the Keras Input
test_image = np.expand_dims(test_image, axis = 0)
# Normalizing the image to be compatible with model
test_image =test_image*1/255.0
print('After expand_dims: {}'.format(test_image.shape))
cnn_model.compile(run_eagerly=True)
#Predict result of the test image
test_result = cnn_model.predict(test_image)
Y_pred_classes = np.argmax(test_result,axis = 1)
print("Y_pred_classes :{}".format(Y_pred_classes))
Y_pred_df = labels.inverse_transform(Y_pred_classes)
results_data = pd.DataFrame(data={'species':Y_pred_df})
results_data
Shape of the test_image : (80, 80, 3) After expand_dims: (1, 80, 80, 3) 1/1 [==============================] - 0s 49ms/step Y_pred_classes :[7]
| species | |
|---|---|
| 0 | Maize |
import warnings
warnings.filterwarnings('ignore')
from tflearn.datasets import oxflower17
x, y = oxflower17.load_data()
WARNING:tensorflow:From C:\Users\srile\anaconda3\lib\site-packages\tensorflow\python\compat\v2_compat.py:107: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version. Instructions for updating: non-resource variables are not supported in the long term curses is not supported on this machine (please install/reinstall curses for an optimal experience)
print("Shape of x : {}".format(x.shape))
print("Shape of y : {}".format(y.shape))
Shape of x : (1360, 224, 224, 3) Shape of y : (1360,)
The dataset is loaded and split into x and y
print("Shape of x : {}".format(x.shape))
print("Shape of y : {}".format(y.shape))
Shape of x : (1360, 224, 224, 3) Shape of y : (1360,)
# Lets us first see the current dimensions of the images
img_shape=[]
for image in x:
img_shape.append(image.shape)
dimension = pd.DataFrame({'dim':img_shape})
dimension['dim'].unique()
array([(224, 224, 3)], dtype=object)
dimension['dim'].value_counts()
(224, 224, 3) 1360 Name: dim, dtype: int64
print("Number of images in the dataset : {}".format(x.shape[0]))
print("Shape of the images are width : {}, height : {},channels : {}".format(x.shape[1],x.shape[2],x.shape[3]))
Number of images in the dataset : 1360 Shape of the images are width : 224, height : 224,channels : 3
y_df = pd.DataFrame(y, columns = ['classes'])
print("Count of each class:")
y_df['classes'].sort_values().value_counts()
Count of each class:
0 80 9 80 15 80 14 80 13 80 12 80 11 80 10 80 8 80 1 80 7 80 6 80 5 80 4 80 3 80 2 80 16 80 Name: classes, dtype: int64
def image_display(x,y,n):
# Visualization
import matplotlib.pyplot as plt
import numpy as np
rand = np.random.randint(0, len(x), n) # Generating random numbers out of total number of flowers
print(rand)
plt.figure(figsize=(20, 20))
for i,j in enumerate(rand):
plt.subplot(1, len(rand), i+1)
plt.imshow(x[j]) # greens, reds, blues, rgb
plt.title("{}".format(y[j]))
plt.axis('off')
plt.show()
# calling the function
image_display(x,y,5)
[ 348 475 441 845 1283]
# selecting a random number
rand = np.random.randint(0, len(x))
print("Random int :",rand)
rand_img=x[rand]
plt.imshow(rand_img)
plt.title("{}".format(y[rand]))
Random int : 386
Text(0.5, 1.0, '8')
# Another way of selecting random image
import random
# Original Image
plt.figure(figsize=(8,8))
orig_img = random.choice(x)
plt.imshow(orig_img)
<matplotlib.image.AxesImage at 0x15a2f38aeb0>
# Gray Image
gray_image = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY)
img_title = ['original_image', 'gray_image']
plt.figure(figsize=(10,10))
for i,img in enumerate([orig_img, gray_image]):
plt.subplot(1,2,i+1)
plt.title(img_title[i])
plt.imshow(img,cmap='gray')
# Sharp Image
sharp_filter = np.array([[0,-1,0],
[-1,5,-1],
[0,-1,0]])
sharp_img = cv2.filter2D(orig_img,kernel=sharp_filter,ddepth=-1)
img_title = ['original_image', 'sharp_img']
plt.figure(figsize=(10,10))
for i,img in enumerate([orig_img, sharp_img]):
plt.subplot(1,2,i+1)
plt.title(img_title[i])
plt.imshow(img,cmap='gray')
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
# Blur Image
blur_img = cv2.GaussianBlur(orig_img,ksize=(5,5),sigmaX=0,sigmaY=0)
img_title = ['original_image', 'blur_img']
plt.figure(figsize=(10,10))
for i,img in enumerate([orig_img, blur_img]):
plt.subplot(1,2,i+1)
plt.title(img_title[i])
plt.imshow(img,cmap='gray')
img_title = ['original_image', 'gray_image', 'blurred_image', 'sharp_image']
plt.figure(figsize=(20,20))
for i,img in enumerate([orig_img, gray_image, blur_img, sharp_img]):
plt.subplot(1,4,i+1)
plt.title(img_title[i])
plt.imshow(img,cmap='gray')
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
The differences are visible among original image, gray image, blurred image and sharp image
import numpy as np
X = np.array(x)
y = np.array(y)
print(X.shape, y.shape)
(1360, 224, 224, 3) (1360,)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
print("Shape of X_train : {}".format(X_train.shape))
print("Shape of X_test : {}".format(X_test.shape))
print("Shape of y_train : {}".format(y_train.shape))
print("Shape of y_test : {}".format(y_test.shape))
Shape of X_train : (1088, 224, 224, 3) Shape of X_test : (272, 224, 224, 3) Shape of y_train : (1088,) Shape of y_test : (272,)
# Reshaping the train and test data as per the Supervised Learning model requirement
X_train_sl = X_train.astype('float32')
X_train_sl = X_train_sl.reshape(X_train_sl.shape[0], 150528) # 224X224X3 = 150528
X_train_sl = X_train_sl/255
X_test_sl = X_test.astype('float32')
X_test_sl = X_test_sl.reshape(X_test_sl.shape[0], 150528) # 224X224X3 = 150528
X_test_sl = X_test_sl/255
print('Shape of X_train_sl : {}'.format(X_train_sl.shape))
print('Shape of X_test_sl : {}'.format(X_test_sl.shape))
print('Shape of y_train : {}'.format(y_train.shape))
print('Shape of y_test : {}'.format(y_test.shape))
Shape of X_train_sl : (1088, 150528) Shape of X_test_sl : (272, 150528) Shape of y_train : (1088,) Shape of y_test : (272,)
from sklearn. linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr_model = LogisticRegression()
lr_model.fit(X_train_sl, y_train)
y_pred_train=lr_model.predict(X_train_sl)
lr_model_acc_train = accuracy_score(y_true=y_train, y_pred=y_pred_train)
print("Train Accuracy : ", lr_model_acc_train)
y_pred_test = lr_model.predict(X_test_sl)
lr_model_acc_test = accuracy_score(y_true=y_test, y_pred=y_pred_test)
print("Test Accuracy : ", lr_model_acc_test)
Train Accuracy : 0.5110294117647058 Test Accuracy : 0.3713235294117647
print('Accuracy on Training data using logistic regression model is {}'.format(lr_model.score(X_train_sl, y_train)))
print('Accuracy on Test data using logistic regression model is {}'.format(lr_model.score(X_test_sl, y_test)))
Accuracy on Training data using logistic regression model is 0.5110294117647058 Accuracy on Test data using logistic regression model is 0.3713235294117647
cm=metrics.confusion_matrix(y_test, y_pred_test, labels=[i for i in range(16)])
df_cm = pd.DataFrame(cm, index = [i for i in range(16)],
columns = [i for i in range(16)])
plt.figure(figsize = (7,5))
sns.heatmap(df_cm, annot=True,fmt='g')
<AxesSubplot:>
print("classification Matrix for logistic regression model :\n",classification_report(y_test,y_pred_test))
classification Matrix for logistic regression model :
precision recall f1-score support
0 0.31 0.25 0.28 16
1 1.00 0.06 0.12 16
2 0.43 0.62 0.51 16
3 0.57 0.25 0.35 16
4 0.27 0.56 0.37 16
5 1.00 0.12 0.22 16
6 0.30 0.19 0.23 16
7 0.50 0.38 0.43 16
8 0.64 0.56 0.60 16
9 0.21 0.56 0.31 16
10 0.25 0.31 0.28 16
11 0.27 0.44 0.33 16
12 0.41 0.56 0.47 16
13 0.17 0.06 0.09 16
14 0.38 0.19 0.25 16
15 0.33 0.19 0.24 16
16 0.67 1.00 0.80 16
accuracy 0.37 272
macro avg 0.45 0.37 0.35 272
weighted avg 0.45 0.37 0.35 272
Precision: Out of all predicted values, what fraction are predicted correctly
Recall(sensitivity or TPR): Out of all actual values how much fraction we identified correctly
Score accuracy for test data using logistic regression model is 37%
## Encoding labels of the images
from tensorflow.keras.utils import to_categorical
classes = y_df.iloc[:,0].unique()
labels = preprocessing.LabelEncoder()
labels.fit(classes)
encodedlabels = labels.transform(y_df.iloc[:,0])
print("Encoded labels are : \n{}".format(encodedlabels))
print('\nClasses : \n{}'.format(str(labels.classes_)))
class_dict={l: i for (i, l) in enumerate(labels.classes_)}
print('\nClass dictionary : \n{}'.format(class_dict))
y=to_categorical(encodedlabels)
print("\ny after encoding : \n{}".format(y))
Encoded labels are :
[11 4 15 ... 5 12 1]
Classes :
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
Class dictionary :
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16}
y after encoding :
[[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 1. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 1. 0. ... 0. 0. 0.]]
X = np.array(x)
y = np.array(y)
print("Shape of X : {}".format(X.shape))
print("Shape of y : {}".format(y.shape))
Shape of X : (1360, 224, 224, 3) Shape of y : (1360, 17)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=62, stratify=y)
# Reshaping the train and test data as per NN model requirement
X_train_nn = X_train.astype('float32')
X_train_nn = X_train_nn.reshape(X_train_nn.shape[0], 150528) # 224X224X3 = 150528
X_train_nn = X_train_nn/255
X_test_nn = X_test.astype('float32')
X_test_nn = X_test_nn.reshape(X_test_nn.shape[0], 150528) # 224X224X3 = 150528
X_test_nn = X_test_nn/255
print("Shape of X_train_nn : {}".format(X_train_nn.shape))
print("Shape of y_train : {}".format(y_train.shape))
print("Shape of X_test_nn : {}".format(X_test_nn.shape))
print("Shape of y_test : {}".format(y_test.shape))
Shape of X_train_nn : (1088, 150528) Shape of y_train : (1088, 17) Shape of X_test_nn : (272, 150528) Shape of y_test : (272, 17)
# no.of classes
num_class_train = y_train.shape[1]
print("The number of classes in train data:",num_class_train)
The number of classes in train data: 17
# create the nn classifier model for oxflower17 data
num_classes=num_class_train
def create_oxf_nn_model(lr,batch_size):
np.random.seed(100)
oxf_nn_model = Sequential()
# The Input Layer :
oxf_nn_model.add(Dense(512,input_dim = X_train_nn.shape[1], activation='relu'))
# The Hidden Layers :
oxf_nn_model.add(Dense(256,activation='relu'))
oxf_nn_model.add(Dropout(rate=0.2))
oxf_nn_model.add(Dense(128,activation='relu'))
oxf_nn_model.add(Dropout(rate=0.2))
oxf_nn_model.add(Dense(64,activation='relu'))
oxf_nn_model.add(Dense(64,activation='relu'))
oxf_nn_model.add(Dropout(rate=0.2))
oxf_nn_model.add(Dense(32,activation='relu'))
# The Output Layer :
oxf_nn_model.add(Dense(num_classes,activation='softmax'))
# Compile the network :
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
oxf_nn_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
oxf_nn_model.summary()
return oxf_nn_model
# Best suitable parameters for this model can be
lr=0.0001
batch_size=100
EPOCH=100
# Train the NN classifier model for oxflower17 data
oxf_nn_model = create_oxf_nn_model(lr,batch_size)
oxf_nn_model_fitted = oxf_nn_model.fit(X_train_nn, y_train, validation_data=(X_test_nn, y_test), epochs=EPOCH, batch_size=batch_size, verbose=2)
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_3 (Dense) (None, 512) 77070848
dense_4 (Dense) (None, 256) 131328
dropout_4 (Dropout) (None, 256) 0
dense_5 (Dense) (None, 128) 32896
dropout_5 (Dropout) (None, 128) 0
dense_6 (Dense) (None, 64) 8256
dense_7 (Dense) (None, 64) 4160
dropout_6 (Dropout) (None, 64) 0
dense_8 (Dense) (None, 32) 2080
dense_9 (Dense) (None, 17) 561
=================================================================
Total params: 77,250,129
Trainable params: 77,250,129
Non-trainable params: 0
_________________________________________________________________
Train on 1088 samples, validate on 272 samples
Epoch 1/100
1088/1088 - 6s - loss: 2.8327 - acc: 0.0561 - val_loss: 2.8319 - val_acc: 0.0588 - 6s/epoch - 6ms/sample
Epoch 2/100
1088/1088 - 5s - loss: 2.8321 - acc: 0.0680 - val_loss: 2.8303 - val_acc: 0.0184 - 5s/epoch - 5ms/sample
Epoch 3/100
1088/1088 - 5s - loss: 2.8303 - acc: 0.0570 - val_loss: 2.8277 - val_acc: 0.0221 - 5s/epoch - 5ms/sample
Epoch 4/100
1088/1088 - 5s - loss: 2.8264 - acc: 0.0487 - val_loss: 2.8237 - val_acc: 0.0257 - 5s/epoch - 5ms/sample
Epoch 5/100
1088/1088 - 6s - loss: 2.8219 - acc: 0.0542 - val_loss: 2.8162 - val_acc: 0.0515 - 6s/epoch - 5ms/sample
Epoch 6/100
1088/1088 - 6s - loss: 2.8157 - acc: 0.0634 - val_loss: 2.8086 - val_acc: 0.0625 - 6s/epoch - 6ms/sample
Epoch 7/100
1088/1088 - 6s - loss: 2.8094 - acc: 0.0717 - val_loss: 2.7953 - val_acc: 0.0662 - 6s/epoch - 5ms/sample
Epoch 8/100
1088/1088 - 6s - loss: 2.7974 - acc: 0.0735 - val_loss: 2.7787 - val_acc: 0.0735 - 6s/epoch - 6ms/sample
Epoch 9/100
1088/1088 - 6s - loss: 2.7784 - acc: 0.0809 - val_loss: 2.7583 - val_acc: 0.0662 - 6s/epoch - 6ms/sample
Epoch 10/100
1088/1088 - 6s - loss: 2.7650 - acc: 0.0855 - val_loss: 2.7323 - val_acc: 0.0809 - 6s/epoch - 6ms/sample
Epoch 11/100
1088/1088 - 6s - loss: 2.7404 - acc: 0.0965 - val_loss: 2.6994 - val_acc: 0.1397 - 6s/epoch - 6ms/sample
Epoch 12/100
1088/1088 - 7s - loss: 2.7207 - acc: 0.0993 - val_loss: 2.6648 - val_acc: 0.1360 - 7s/epoch - 6ms/sample
Epoch 13/100
1088/1088 - 7s - loss: 2.6738 - acc: 0.1250 - val_loss: 2.6240 - val_acc: 0.1507 - 7s/epoch - 6ms/sample
Epoch 14/100
1088/1088 - 7s - loss: 2.6491 - acc: 0.1241 - val_loss: 2.5866 - val_acc: 0.1471 - 7s/epoch - 6ms/sample
Epoch 15/100
1088/1088 - 6s - loss: 2.5965 - acc: 0.1471 - val_loss: 2.5445 - val_acc: 0.1397 - 6s/epoch - 6ms/sample
Epoch 16/100
1088/1088 - 7s - loss: 2.5632 - acc: 0.1379 - val_loss: 2.5037 - val_acc: 0.1544 - 7s/epoch - 6ms/sample
Epoch 17/100
1088/1088 - 7s - loss: 2.5398 - acc: 0.1480 - val_loss: 2.4746 - val_acc: 0.1765 - 7s/epoch - 6ms/sample
Epoch 18/100
1088/1088 - 6s - loss: 2.5146 - acc: 0.1664 - val_loss: 2.4481 - val_acc: 0.1838 - 6s/epoch - 6ms/sample
Epoch 19/100
1088/1088 - 6s - loss: 2.4725 - acc: 0.1645 - val_loss: 2.4178 - val_acc: 0.1875 - 6s/epoch - 6ms/sample
Epoch 20/100
1088/1088 - 6s - loss: 2.4562 - acc: 0.1544 - val_loss: 2.3970 - val_acc: 0.1985 - 6s/epoch - 6ms/sample
Epoch 21/100
1088/1088 - 6s - loss: 2.4314 - acc: 0.1719 - val_loss: 2.3748 - val_acc: 0.2022 - 6s/epoch - 6ms/sample
Epoch 22/100
1088/1088 - 6s - loss: 2.4063 - acc: 0.1572 - val_loss: 2.3521 - val_acc: 0.1985 - 6s/epoch - 5ms/sample
Epoch 23/100
1088/1088 - 6s - loss: 2.3825 - acc: 0.1792 - val_loss: 2.3376 - val_acc: 0.1912 - 6s/epoch - 6ms/sample
Epoch 24/100
1088/1088 - 7s - loss: 2.3776 - acc: 0.1792 - val_loss: 2.3193 - val_acc: 0.1912 - 7s/epoch - 6ms/sample
Epoch 25/100
1088/1088 - 6s - loss: 2.3463 - acc: 0.1893 - val_loss: 2.3073 - val_acc: 0.1912 - 6s/epoch - 6ms/sample
Epoch 26/100
1088/1088 - 6s - loss: 2.3298 - acc: 0.1829 - val_loss: 2.2967 - val_acc: 0.1949 - 6s/epoch - 6ms/sample
Epoch 27/100
1088/1088 - 6s - loss: 2.2823 - acc: 0.1912 - val_loss: 2.2669 - val_acc: 0.2022 - 6s/epoch - 6ms/sample
Epoch 28/100
1088/1088 - 6s - loss: 2.2796 - acc: 0.2031 - val_loss: 2.2585 - val_acc: 0.1949 - 6s/epoch - 6ms/sample
Epoch 29/100
1088/1088 - 6s - loss: 2.2637 - acc: 0.2151 - val_loss: 2.2462 - val_acc: 0.2169 - 6s/epoch - 6ms/sample
Epoch 30/100
1088/1088 - 6s - loss: 2.2397 - acc: 0.2197 - val_loss: 2.2302 - val_acc: 0.2132 - 6s/epoch - 6ms/sample
Epoch 31/100
1088/1088 - 7s - loss: 2.2426 - acc: 0.1994 - val_loss: 2.2172 - val_acc: 0.2279 - 7s/epoch - 6ms/sample
Epoch 32/100
1088/1088 - 6s - loss: 2.2092 - acc: 0.2243 - val_loss: 2.2028 - val_acc: 0.2353 - 6s/epoch - 5ms/sample
Epoch 33/100
1088/1088 - 7s - loss: 2.1997 - acc: 0.2077 - val_loss: 2.1886 - val_acc: 0.2574 - 7s/epoch - 6ms/sample
Epoch 34/100
1088/1088 - 6s - loss: 2.1640 - acc: 0.2252 - val_loss: 2.1898 - val_acc: 0.2574 - 6s/epoch - 6ms/sample
Epoch 35/100
1088/1088 - 6s - loss: 2.1678 - acc: 0.2261 - val_loss: 2.1761 - val_acc: 0.2647 - 6s/epoch - 6ms/sample
Epoch 36/100
1088/1088 - 6s - loss: 2.1547 - acc: 0.2224 - val_loss: 2.1533 - val_acc: 0.2647 - 6s/epoch - 6ms/sample
Epoch 37/100
1088/1088 - 6s - loss: 2.1261 - acc: 0.2472 - val_loss: 2.1624 - val_acc: 0.2574 - 6s/epoch - 6ms/sample
Epoch 38/100
1088/1088 - 6s - loss: 2.1226 - acc: 0.2353 - val_loss: 2.1405 - val_acc: 0.2757 - 6s/epoch - 6ms/sample
Epoch 39/100
1088/1088 - 6s - loss: 2.1130 - acc: 0.2436 - val_loss: 2.1384 - val_acc: 0.2794 - 6s/epoch - 6ms/sample
Epoch 40/100
1088/1088 - 6s - loss: 2.0921 - acc: 0.2564 - val_loss: 2.1195 - val_acc: 0.2904 - 6s/epoch - 5ms/sample
Epoch 41/100
1088/1088 - 6s - loss: 2.0808 - acc: 0.2472 - val_loss: 2.1115 - val_acc: 0.2831 - 6s/epoch - 6ms/sample
Epoch 42/100
1088/1088 - 6s - loss: 2.0549 - acc: 0.2702 - val_loss: 2.1015 - val_acc: 0.2684 - 6s/epoch - 5ms/sample
Epoch 43/100
1088/1088 - 6s - loss: 2.0272 - acc: 0.2831 - val_loss: 2.0856 - val_acc: 0.2831 - 6s/epoch - 5ms/sample
Epoch 44/100
1088/1088 - 6s - loss: 1.9966 - acc: 0.2785 - val_loss: 2.0855 - val_acc: 0.2794 - 6s/epoch - 5ms/sample
Epoch 45/100
1088/1088 - 6s - loss: 1.9862 - acc: 0.2730 - val_loss: 2.0642 - val_acc: 0.2868 - 6s/epoch - 5ms/sample
Epoch 46/100
1088/1088 - 5s - loss: 1.9906 - acc: 0.2767 - val_loss: 2.0533 - val_acc: 0.2794 - 5s/epoch - 5ms/sample
Epoch 47/100
1088/1088 - 5s - loss: 1.9707 - acc: 0.2537 - val_loss: 2.0410 - val_acc: 0.2794 - 5s/epoch - 5ms/sample
Epoch 48/100
1088/1088 - 6s - loss: 1.9448 - acc: 0.3070 - val_loss: 2.0480 - val_acc: 0.2757 - 6s/epoch - 5ms/sample
Epoch 49/100
1088/1088 - 5s - loss: 1.9306 - acc: 0.2849 - val_loss: 2.0392 - val_acc: 0.2794 - 5s/epoch - 5ms/sample
Epoch 50/100
1088/1088 - 5s - loss: 1.9329 - acc: 0.2776 - val_loss: 2.0225 - val_acc: 0.2904 - 5s/epoch - 5ms/sample
Epoch 51/100
1088/1088 - 6s - loss: 1.9013 - acc: 0.3088 - val_loss: 2.0211 - val_acc: 0.2794 - 6s/epoch - 5ms/sample
Epoch 52/100
1088/1088 - 5s - loss: 1.8865 - acc: 0.3042 - val_loss: 2.0002 - val_acc: 0.2794 - 5s/epoch - 5ms/sample
Epoch 53/100
1088/1088 - 5s - loss: 1.8563 - acc: 0.3300 - val_loss: 1.9984 - val_acc: 0.2978 - 5s/epoch - 5ms/sample
Epoch 54/100
1088/1088 - 6s - loss: 1.8354 - acc: 0.3281 - val_loss: 2.0022 - val_acc: 0.2831 - 6s/epoch - 5ms/sample
Epoch 55/100
1088/1088 - 6s - loss: 1.8196 - acc: 0.3428 - val_loss: 1.9695 - val_acc: 0.2868 - 6s/epoch - 5ms/sample
Epoch 56/100
1088/1088 - 6s - loss: 1.7931 - acc: 0.3373 - val_loss: 1.9871 - val_acc: 0.3015 - 6s/epoch - 5ms/sample
Epoch 57/100
1088/1088 - 6s - loss: 1.8005 - acc: 0.3327 - val_loss: 2.0229 - val_acc: 0.3199 - 6s/epoch - 5ms/sample
Epoch 58/100
1088/1088 - 6s - loss: 1.7729 - acc: 0.3566 - val_loss: 1.9582 - val_acc: 0.3051 - 6s/epoch - 5ms/sample
Epoch 59/100
1088/1088 - 6s - loss: 1.7838 - acc: 0.3511 - val_loss: 1.9416 - val_acc: 0.2941 - 6s/epoch - 5ms/sample
Epoch 60/100
1088/1088 - 5s - loss: 1.7176 - acc: 0.3520 - val_loss: 1.9356 - val_acc: 0.2978 - 5s/epoch - 5ms/sample
Epoch 61/100
1088/1088 - 6s - loss: 1.7155 - acc: 0.3539 - val_loss: 1.9250 - val_acc: 0.3088 - 6s/epoch - 5ms/sample
Epoch 62/100
1088/1088 - 6s - loss: 1.7276 - acc: 0.3456 - val_loss: 1.9330 - val_acc: 0.3272 - 6s/epoch - 5ms/sample
Epoch 63/100
1088/1088 - 6s - loss: 1.7032 - acc: 0.3667 - val_loss: 1.9261 - val_acc: 0.3309 - 6s/epoch - 5ms/sample
Epoch 64/100
1088/1088 - 6s - loss: 1.6733 - acc: 0.3787 - val_loss: 1.9178 - val_acc: 0.3346 - 6s/epoch - 5ms/sample
Epoch 65/100
1088/1088 - 5s - loss: 1.6671 - acc: 0.3842 - val_loss: 1.9118 - val_acc: 0.3529 - 5s/epoch - 5ms/sample
Epoch 66/100
1088/1088 - 5s - loss: 1.6183 - acc: 0.3897 - val_loss: 1.9047 - val_acc: 0.3493 - 5s/epoch - 5ms/sample
Epoch 67/100
1088/1088 - 5s - loss: 1.6283 - acc: 0.3787 - val_loss: 1.9011 - val_acc: 0.3419 - 5s/epoch - 5ms/sample
Epoch 68/100
1088/1088 - 5s - loss: 1.5913 - acc: 0.4090 - val_loss: 1.9146 - val_acc: 0.3603 - 5s/epoch - 5ms/sample
Epoch 69/100
1088/1088 - 5s - loss: 1.5899 - acc: 0.4154 - val_loss: 1.8999 - val_acc: 0.3529 - 5s/epoch - 5ms/sample
Epoch 70/100
1088/1088 - 6s - loss: 1.5530 - acc: 0.4320 - val_loss: 1.9076 - val_acc: 0.3750 - 6s/epoch - 5ms/sample
Epoch 71/100
1088/1088 - 6s - loss: 1.5626 - acc: 0.4081 - val_loss: 1.8806 - val_acc: 0.3456 - 6s/epoch - 5ms/sample
Epoch 72/100
1088/1088 - 5s - loss: 1.5164 - acc: 0.4504 - val_loss: 1.8986 - val_acc: 0.3676 - 5s/epoch - 5ms/sample
Epoch 73/100
1088/1088 - 6s - loss: 1.5585 - acc: 0.4265 - val_loss: 1.8936 - val_acc: 0.3603 - 6s/epoch - 5ms/sample
Epoch 74/100
1088/1088 - 5s - loss: 1.4974 - acc: 0.4375 - val_loss: 1.8959 - val_acc: 0.3603 - 5s/epoch - 5ms/sample
Epoch 75/100
1088/1088 - 6s - loss: 1.4698 - acc: 0.4577 - val_loss: 1.9013 - val_acc: 0.3824 - 6s/epoch - 5ms/sample
Epoch 76/100
1088/1088 - 6s - loss: 1.4537 - acc: 0.4550 - val_loss: 1.8841 - val_acc: 0.3640 - 6s/epoch - 5ms/sample
Epoch 77/100
1088/1088 - 5s - loss: 1.4675 - acc: 0.4449 - val_loss: 1.8734 - val_acc: 0.3713 - 5s/epoch - 5ms/sample
Epoch 78/100
1088/1088 - 5s - loss: 1.4302 - acc: 0.4568 - val_loss: 1.8781 - val_acc: 0.3750 - 5s/epoch - 5ms/sample
Epoch 79/100
1088/1088 - 5s - loss: 1.4470 - acc: 0.4596 - val_loss: 1.8718 - val_acc: 0.3824 - 5s/epoch - 5ms/sample
Epoch 80/100
1088/1088 - 6s - loss: 1.4157 - acc: 0.4743 - val_loss: 1.8750 - val_acc: 0.3787 - 6s/epoch - 5ms/sample
Epoch 81/100
1088/1088 - 6s - loss: 1.4405 - acc: 0.4531 - val_loss: 1.8756 - val_acc: 0.3934 - 6s/epoch - 5ms/sample
Epoch 82/100
1088/1088 - 6s - loss: 1.3992 - acc: 0.4614 - val_loss: 1.9151 - val_acc: 0.3493 - 6s/epoch - 5ms/sample
Epoch 83/100
1088/1088 - 6s - loss: 1.3808 - acc: 0.5000 - val_loss: 1.8804 - val_acc: 0.3824 - 6s/epoch - 5ms/sample
Epoch 84/100
1088/1088 - 6s - loss: 1.3802 - acc: 0.4881 - val_loss: 1.8641 - val_acc: 0.3860 - 6s/epoch - 5ms/sample
Epoch 85/100
1088/1088 - 6s - loss: 1.3446 - acc: 0.4963 - val_loss: 1.8887 - val_acc: 0.3934 - 6s/epoch - 5ms/sample
Epoch 86/100
1088/1088 - 6s - loss: 1.3810 - acc: 0.4724 - val_loss: 1.9126 - val_acc: 0.3824 - 6s/epoch - 5ms/sample
Epoch 87/100
1088/1088 - 6s - loss: 1.3164 - acc: 0.5129 - val_loss: 1.9062 - val_acc: 0.3860 - 6s/epoch - 5ms/sample
Epoch 88/100
1088/1088 - 6s - loss: 1.3110 - acc: 0.5009 - val_loss: 1.8741 - val_acc: 0.3860 - 6s/epoch - 5ms/sample
Epoch 89/100
1088/1088 - 5s - loss: 1.2827 - acc: 0.5248 - val_loss: 1.8788 - val_acc: 0.3824 - 5s/epoch - 5ms/sample
Epoch 90/100
1088/1088 - 6s - loss: 1.2980 - acc: 0.5147 - val_loss: 1.9774 - val_acc: 0.4007 - 6s/epoch - 5ms/sample
Epoch 91/100
1088/1088 - 5s - loss: 1.2930 - acc: 0.5322 - val_loss: 1.8857 - val_acc: 0.3860 - 5s/epoch - 5ms/sample
Epoch 92/100
1088/1088 - 6s - loss: 1.2372 - acc: 0.5331 - val_loss: 1.8978 - val_acc: 0.4081 - 6s/epoch - 5ms/sample
Epoch 93/100
1088/1088 - 5s - loss: 1.2653 - acc: 0.5377 - val_loss: 1.9014 - val_acc: 0.3860 - 5s/epoch - 5ms/sample
Epoch 94/100
1088/1088 - 6s - loss: 1.2318 - acc: 0.5377 - val_loss: 1.9201 - val_acc: 0.3824 - 6s/epoch - 5ms/sample
Epoch 95/100
1088/1088 - 6s - loss: 1.2539 - acc: 0.5423 - val_loss: 1.9073 - val_acc: 0.3750 - 6s/epoch - 5ms/sample
Epoch 96/100
1088/1088 - 6s - loss: 1.2343 - acc: 0.5487 - val_loss: 1.8892 - val_acc: 0.3897 - 6s/epoch - 5ms/sample
Epoch 97/100
1088/1088 - 5s - loss: 1.1975 - acc: 0.5469 - val_loss: 1.8959 - val_acc: 0.3934 - 5s/epoch - 5ms/sample
Epoch 98/100
1088/1088 - 5s - loss: 1.2111 - acc: 0.5496 - val_loss: 1.9089 - val_acc: 0.4044 - 5s/epoch - 5ms/sample
Epoch 99/100
1088/1088 - 5s - loss: 1.1697 - acc: 0.5478 - val_loss: 1.9001 - val_acc: 0.3934 - 5s/epoch - 5ms/sample
Epoch 100/100
1088/1088 - 5s - loss: 1.1582 - acc: 0.5735 - val_loss: 1.8914 - val_acc: 0.3934 - 5s/epoch - 5ms/sample
oxf_nn_model_results = oxf_nn_model.evaluate(X_test_nn, y_test)
print('Test loss: ', oxf_nn_model_results[0])
print('Test accuracy: ', oxf_nn_model_results[1])
Test loss: 1.8913742864833158 Test accuracy: 0.39338234
loss_train = oxf_nn_model_fitted.history['loss']
loss_val = oxf_nn_model_fitted.history['val_loss']
epochs = range(1,EPOCH+1)
plt.plot(epochs, loss_train, 'g', label='Training loss')
plt.plot(epochs, loss_val, 'b', label='validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
Acc_train = oxf_nn_model_fitted.history['acc']
Acc_val = oxf_nn_model_fitted.history['val_acc']
epochs = range(1,EPOCH+1)
plt.plot(epochs, Acc_train, 'g', label='Training accuracy')
plt.plot(epochs, Acc_val, 'b', label='validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
The trained Neural Network model has Test accuracy of 39.33%. This is better than Supervised learning - logistic regression model.
# create the Cnn model for plant seedlings data
num_classes=num_class_train
def create_oxf_cnn_model():
# Initialising the CNN classifier
oxf_cnn_model = Sequential()
# Add a Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
oxf_cnn_model.add(Conv2D(32, (3, 3), input_shape = (224, 224, 3), activation = 'relu',padding='same'))
oxf_cnn_model.add(BatchNormalization(axis=3))
# Add a Max Pooling layer of size 2X2
oxf_cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
# Add a Convolution layer with 64 kernels of 3X3 shape with activation function ReLU
oxf_cnn_model.add(Conv2D(64, (3, 3), activation = 'relu',padding='same'))
oxf_cnn_model.add(BatchNormalization(axis=3))
# Add a Max Pooling layer of size 2X2
oxf_cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
# Add another Convolution layer with 128 kernels of 3X3 shape with activation function ReLU
oxf_cnn_model.add(Conv2D(128, (3, 3), activation = 'relu',padding='same'))
# Adding another pooling layer
oxf_cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
oxf_cnn_model.add(BatchNormalization(axis=3))
oxf_cnn_model.add(Dropout(0.25))
# Add another Convolution layer with 256 kernels of 3X3 shape with activation function ReLU
oxf_cnn_model.add(Conv2D(256, (3, 3), activation = 'relu',padding='same'))
# Adding another pooling layer
oxf_cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
oxf_cnn_model.add(BatchNormalization(axis=3))
oxf_cnn_model.add(Dropout(0.25))
# Add another Convolution layer with 512 kernels of 3X3 shape with activation function ReLU
oxf_cnn_model.add(Conv2D(512, (3, 3), activation = 'relu', padding='same'))
# Adding another pooling layer
oxf_cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
oxf_cnn_model.add(BatchNormalization(axis=3))
oxf_cnn_model.add(Dropout(0.25))
# Flattening the layer before fully connected layers
oxf_cnn_model.add(Flatten())
# The final output layer with 17 neuron to predict the categorical classifcation
oxf_cnn_model.add(Dense(units = 17, activation = 'softmax'))
# Compile the network :
optimizer = tf.keras.optimizers.Adam()
oxf_cnn_model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
oxf_cnn_model.summary()
return oxf_cnn_model
# Best suitable parameters for this model can be
# b-100,e-100,0.7242
# b-80,e-200,0.7352
epoch = 100
batch_size = 80
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',patience=6,verbose=1, factor=0.5,min_lr=0.00001)
# Train the CNN classifier model for Plant seedlings data
oxf_cnn_model = create_oxf_cnn_model()
oxf_cnn_model_fitted = oxf_cnn_model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epoch, batch_size=batch_size,verbose=2,callbacks=[learning_rate_reduction])
WARNING:tensorflow:From C:\Users\srile\anaconda3\lib\site-packages\keras\layers\normalization\batch_normalization.py:514: _colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_3 (Conv2D) (None, 224, 224, 32) 896
batch_normalization_5 (Batc (None, 224, 224, 32) 128
hNormalization)
max_pooling2d_3 (MaxPooling (None, 112, 112, 32) 0
2D)
conv2d_4 (Conv2D) (None, 112, 112, 64) 18496
batch_normalization_6 (Batc (None, 112, 112, 64) 256
hNormalization)
max_pooling2d_4 (MaxPooling (None, 56, 56, 64) 0
2D)
conv2d_5 (Conv2D) (None, 56, 56, 128) 73856
max_pooling2d_5 (MaxPooling (None, 28, 28, 128) 0
2D)
batch_normalization_7 (Batc (None, 28, 28, 128) 512
hNormalization)
dropout_7 (Dropout) (None, 28, 28, 128) 0
conv2d_6 (Conv2D) (None, 28, 28, 256) 295168
max_pooling2d_6 (MaxPooling (None, 14, 14, 256) 0
2D)
batch_normalization_8 (Batc (None, 14, 14, 256) 1024
hNormalization)
dropout_8 (Dropout) (None, 14, 14, 256) 0
conv2d_7 (Conv2D) (None, 14, 14, 512) 1180160
max_pooling2d_7 (MaxPooling (None, 7, 7, 512) 0
2D)
batch_normalization_9 (Batc (None, 7, 7, 512) 2048
hNormalization)
dropout_9 (Dropout) (None, 7, 7, 512) 0
flatten_1 (Flatten) (None, 25088) 0
dense_10 (Dense) (None, 17) 426513
=================================================================
Total params: 1,999,057
Trainable params: 1,997,073
Non-trainable params: 1,984
_________________________________________________________________
Train on 1088 samples, validate on 272 samples
Epoch 1/100
1088/1088 - 117s - loss: 3.3672 - acc: 0.3787 - val_loss: 2.9420 - val_acc: 0.0699 - lr: 0.0010 - 117s/epoch - 107ms/sample
Epoch 2/100
1088/1088 - 121s - loss: 1.5230 - acc: 0.6526 - val_loss: 4.0161 - val_acc: 0.0956 - lr: 0.0010 - 121s/epoch - 112ms/sample
Epoch 3/100
1088/1088 - 114s - loss: 1.0731 - acc: 0.7353 - val_loss: 5.4606 - val_acc: 0.0919 - lr: 0.0010 - 114s/epoch - 104ms/sample
Epoch 4/100
1088/1088 - 125s - loss: 0.7529 - acc: 0.8125 - val_loss: 6.5371 - val_acc: 0.0662 - lr: 0.0010 - 125s/epoch - 115ms/sample
Epoch 5/100
1088/1088 - 135s - loss: 0.4659 - acc: 0.8768 - val_loss: 6.0887 - val_acc: 0.1103 - lr: 0.0010 - 135s/epoch - 124ms/sample
Epoch 6/100
1088/1088 - 118s - loss: 0.3045 - acc: 0.9118 - val_loss: 6.7492 - val_acc: 0.0625 - lr: 0.0010 - 118s/epoch - 109ms/sample
Epoch 7/100
1088/1088 - 113s - loss: 0.2119 - acc: 0.9301 - val_loss: 7.2865 - val_acc: 0.0662 - lr: 0.0010 - 113s/epoch - 104ms/sample
Epoch 8/100
1088/1088 - 113s - loss: 0.1648 - acc: 0.9449 - val_loss: 8.9379 - val_acc: 0.0588 - lr: 0.0010 - 113s/epoch - 104ms/sample
Epoch 9/100
1088/1088 - 115s - loss: 0.1488 - acc: 0.9586 - val_loss: 9.2747 - val_acc: 0.0772 - lr: 0.0010 - 115s/epoch - 106ms/sample
Epoch 10/100
1088/1088 - 113s - loss: 0.1447 - acc: 0.9531 - val_loss: 7.1547 - val_acc: 0.1397 - lr: 0.0010 - 113s/epoch - 104ms/sample
Epoch 11/100
1088/1088 - 112s - loss: 0.1353 - acc: 0.9623 - val_loss: 10.9172 - val_acc: 0.0588 - lr: 0.0010 - 112s/epoch - 103ms/sample
Epoch 12/100
1088/1088 - 114s - loss: 0.0883 - acc: 0.9770 - val_loss: 10.2580 - val_acc: 0.0625 - lr: 0.0010 - 114s/epoch - 105ms/sample
Epoch 13/100
1088/1088 - 112s - loss: 0.0847 - acc: 0.9706 - val_loss: 6.9594 - val_acc: 0.1176 - lr: 0.0010 - 112s/epoch - 103ms/sample
Epoch 14/100
1088/1088 - 118s - loss: 0.0470 - acc: 0.9835 - val_loss: 8.4379 - val_acc: 0.1397 - lr: 0.0010 - 118s/epoch - 108ms/sample
Epoch 15/100
1088/1088 - 119s - loss: 0.0492 - acc: 0.9844 - val_loss: 9.2077 - val_acc: 0.0919 - lr: 0.0010 - 119s/epoch - 110ms/sample
Epoch 16/100
Epoch 16: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
1088/1088 - 113s - loss: 0.0534 - acc: 0.9807 - val_loss: 9.4374 - val_acc: 0.1176 - lr: 0.0010 - 113s/epoch - 104ms/sample
Epoch 17/100
1088/1088 - 115s - loss: 0.0196 - acc: 0.9945 - val_loss: 8.4978 - val_acc: 0.1618 - lr: 5.0000e-04 - 115s/epoch - 106ms/sample
Epoch 18/100
1088/1088 - 114s - loss: 0.0057 - acc: 0.9982 - val_loss: 6.6895 - val_acc: 0.2059 - lr: 5.0000e-04 - 114s/epoch - 104ms/sample
Epoch 19/100
1088/1088 - 112s - loss: 0.0056 - acc: 0.9991 - val_loss: 6.4249 - val_acc: 0.2169 - lr: 5.0000e-04 - 112s/epoch - 103ms/sample
Epoch 20/100
1088/1088 - 125s - loss: 0.0069 - acc: 0.9982 - val_loss: 6.3420 - val_acc: 0.2243 - lr: 5.0000e-04 - 125s/epoch - 115ms/sample
Epoch 21/100
1088/1088 - 119s - loss: 0.0045 - acc: 0.9982 - val_loss: 6.2881 - val_acc: 0.2353 - lr: 5.0000e-04 - 119s/epoch - 109ms/sample
Epoch 22/100
1088/1088 - 113s - loss: 8.7733e-04 - acc: 1.0000 - val_loss: 6.1640 - val_acc: 0.2574 - lr: 5.0000e-04 - 113s/epoch - 104ms/sample
Epoch 23/100
1088/1088 - 113s - loss: 0.0027 - acc: 0.9991 - val_loss: 5.7952 - val_acc: 0.2757 - lr: 5.0000e-04 - 113s/epoch - 104ms/sample
Epoch 24/100
1088/1088 - 111s - loss: 5.8833e-04 - acc: 1.0000 - val_loss: 5.7286 - val_acc: 0.2831 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 25/100
1088/1088 - 113s - loss: 0.0019 - acc: 0.9991 - val_loss: 5.5799 - val_acc: 0.2904 - lr: 5.0000e-04 - 113s/epoch - 103ms/sample
Epoch 26/100
1088/1088 - 111s - loss: 6.0517e-04 - acc: 1.0000 - val_loss: 5.3797 - val_acc: 0.3125 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 27/100
1088/1088 - 110s - loss: 5.0718e-04 - acc: 1.0000 - val_loss: 5.1415 - val_acc: 0.3088 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 28/100
1088/1088 - 111s - loss: 9.5554e-04 - acc: 1.0000 - val_loss: 4.7918 - val_acc: 0.3235 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 29/100
1088/1088 - 112s - loss: 3.8053e-04 - acc: 1.0000 - val_loss: 4.6111 - val_acc: 0.3235 - lr: 5.0000e-04 - 112s/epoch - 103ms/sample
Epoch 30/100
1088/1088 - 112s - loss: 4.8984e-04 - acc: 1.0000 - val_loss: 4.5480 - val_acc: 0.3456 - lr: 5.0000e-04 - 112s/epoch - 103ms/sample
Epoch 31/100
1088/1088 - 115s - loss: 2.7729e-04 - acc: 1.0000 - val_loss: 4.3452 - val_acc: 0.3713 - lr: 5.0000e-04 - 115s/epoch - 106ms/sample
Epoch 32/100
1088/1088 - 111s - loss: 3.2144e-04 - acc: 1.0000 - val_loss: 4.1131 - val_acc: 0.3934 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 33/100
1088/1088 - 110s - loss: 3.9222e-04 - acc: 1.0000 - val_loss: 3.7552 - val_acc: 0.4081 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 34/100
1088/1088 - 113s - loss: 2.9567e-04 - acc: 1.0000 - val_loss: 3.5179 - val_acc: 0.4265 - lr: 5.0000e-04 - 113s/epoch - 104ms/sample
Epoch 35/100
1088/1088 - 110s - loss: 1.6256e-04 - acc: 1.0000 - val_loss: 3.3295 - val_acc: 0.4449 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 36/100
1088/1088 - 111s - loss: 2.1471e-04 - acc: 1.0000 - val_loss: 3.1559 - val_acc: 0.4706 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 37/100
1088/1088 - 110s - loss: 3.0647e-04 - acc: 1.0000 - val_loss: 3.0005 - val_acc: 0.4890 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 38/100
1088/1088 - 110s - loss: 4.4477e-04 - acc: 1.0000 - val_loss: 2.8491 - val_acc: 0.5147 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 39/100
1088/1088 - 111s - loss: 3.3389e-04 - acc: 1.0000 - val_loss: 2.7228 - val_acc: 0.5551 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 40/100
1088/1088 - 110s - loss: 2.0268e-04 - acc: 1.0000 - val_loss: 2.6096 - val_acc: 0.5846 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 41/100
1088/1088 - 110s - loss: 2.7025e-04 - acc: 1.0000 - val_loss: 2.5080 - val_acc: 0.5956 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 42/100
1088/1088 - 113s - loss: 1.3332e-04 - acc: 1.0000 - val_loss: 2.4268 - val_acc: 0.6176 - lr: 5.0000e-04 - 113s/epoch - 103ms/sample
Epoch 43/100
1088/1088 - 111s - loss: 2.5303e-04 - acc: 1.0000 - val_loss: 2.3587 - val_acc: 0.6287 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 44/100
1088/1088 - 113s - loss: 4.5941e-04 - acc: 1.0000 - val_loss: 2.3420 - val_acc: 0.6507 - lr: 5.0000e-04 - 113s/epoch - 104ms/sample
Epoch 45/100
1088/1088 - 112s - loss: 2.1033e-04 - acc: 1.0000 - val_loss: 2.3099 - val_acc: 0.6544 - lr: 5.0000e-04 - 112s/epoch - 103ms/sample
Epoch 46/100
1088/1088 - 111s - loss: 2.5952e-04 - acc: 1.0000 - val_loss: 2.2690 - val_acc: 0.6618 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 47/100
1088/1088 - 112s - loss: 3.0449e-04 - acc: 1.0000 - val_loss: 2.2322 - val_acc: 0.6765 - lr: 5.0000e-04 - 112s/epoch - 103ms/sample
Epoch 48/100
1088/1088 - 111s - loss: 2.8031e-04 - acc: 1.0000 - val_loss: 2.2067 - val_acc: 0.6912 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 49/100
1088/1088 - 109s - loss: 1.4907e-04 - acc: 1.0000 - val_loss: 2.1723 - val_acc: 0.6912 - lr: 5.0000e-04 - 109s/epoch - 101ms/sample
Epoch 50/100
1088/1088 - 111s - loss: 1.2490e-04 - acc: 1.0000 - val_loss: 2.1581 - val_acc: 0.6949 - lr: 5.0000e-04 - 111s/epoch - 102ms/sample
Epoch 51/100
1088/1088 - 110s - loss: 1.0002e-04 - acc: 1.0000 - val_loss: 2.1519 - val_acc: 0.6949 - lr: 5.0000e-04 - 110s/epoch - 102ms/sample
Epoch 52/100
1088/1088 - 110s - loss: 1.6471e-04 - acc: 1.0000 - val_loss: 2.1482 - val_acc: 0.6949 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 53/100
1088/1088 - 110s - loss: 2.3092e-04 - acc: 1.0000 - val_loss: 2.1359 - val_acc: 0.6949 - lr: 5.0000e-04 - 110s/epoch - 102ms/sample
Epoch 54/100
1088/1088 - 109s - loss: 1.1127e-04 - acc: 1.0000 - val_loss: 2.1320 - val_acc: 0.6912 - lr: 5.0000e-04 - 109s/epoch - 101ms/sample
Epoch 55/100
1088/1088 - 110s - loss: 1.6733e-04 - acc: 1.0000 - val_loss: 2.1317 - val_acc: 0.6875 - lr: 5.0000e-04 - 110s/epoch - 101ms/sample
Epoch 56/100
Epoch 56: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
1088/1088 - 112s - loss: 1.0964e-04 - acc: 1.0000 - val_loss: 2.1310 - val_acc: 0.6912 - lr: 5.0000e-04 - 112s/epoch - 103ms/sample
Epoch 57/100
1088/1088 - 110s - loss: 1.3582e-04 - acc: 1.0000 - val_loss: 2.1331 - val_acc: 0.6912 - lr: 2.5000e-04 - 110s/epoch - 101ms/sample
Epoch 58/100
1088/1088 - 110s - loss: 1.7867e-04 - acc: 1.0000 - val_loss: 2.1395 - val_acc: 0.6875 - lr: 2.5000e-04 - 110s/epoch - 101ms/sample
Epoch 59/100
1088/1088 - 110s - loss: 1.3933e-04 - acc: 1.0000 - val_loss: 2.1460 - val_acc: 0.6949 - lr: 2.5000e-04 - 110s/epoch - 101ms/sample
Epoch 60/100
1088/1088 - 110s - loss: 8.3670e-05 - acc: 1.0000 - val_loss: 2.1527 - val_acc: 0.6912 - lr: 2.5000e-04 - 110s/epoch - 101ms/sample
Epoch 61/100
1088/1088 - 112s - loss: 1.2149e-04 - acc: 1.0000 - val_loss: 2.1568 - val_acc: 0.6912 - lr: 2.5000e-04 - 112s/epoch - 103ms/sample
Epoch 62/100
Epoch 62: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
1088/1088 - 110s - loss: 1.8871e-04 - acc: 1.0000 - val_loss: 2.1609 - val_acc: 0.6912 - lr: 2.5000e-04 - 110s/epoch - 101ms/sample
Epoch 63/100
1088/1088 - 112s - loss: 8.8051e-05 - acc: 1.0000 - val_loss: 2.1633 - val_acc: 0.6912 - lr: 1.2500e-04 - 112s/epoch - 103ms/sample
Epoch 64/100
1088/1088 - 110s - loss: 1.1755e-04 - acc: 1.0000 - val_loss: 2.1644 - val_acc: 0.6912 - lr: 1.2500e-04 - 110s/epoch - 101ms/sample
Epoch 65/100
1088/1088 - 111s - loss: 1.3317e-04 - acc: 1.0000 - val_loss: 2.1651 - val_acc: 0.6949 - lr: 1.2500e-04 - 111s/epoch - 102ms/sample
Epoch 66/100
1088/1088 - 111s - loss: 9.3311e-05 - acc: 1.0000 - val_loss: 2.1658 - val_acc: 0.6949 - lr: 1.2500e-04 - 111s/epoch - 102ms/sample
Epoch 67/100
1088/1088 - 110s - loss: 8.6697e-05 - acc: 1.0000 - val_loss: 2.1670 - val_acc: 0.7022 - lr: 1.2500e-04 - 110s/epoch - 101ms/sample
Epoch 68/100
1088/1088 - 110s - loss: 8.0951e-05 - acc: 1.0000 - val_loss: 2.1676 - val_acc: 0.7059 - lr: 1.2500e-04 - 110s/epoch - 101ms/sample
Epoch 69/100
1088/1088 - 110s - loss: 2.1272e-04 - acc: 1.0000 - val_loss: 2.1698 - val_acc: 0.7022 - lr: 1.2500e-04 - 110s/epoch - 101ms/sample
Epoch 70/100
1088/1088 - 110s - loss: 1.9016e-04 - acc: 1.0000 - val_loss: 2.1720 - val_acc: 0.6949 - lr: 1.2500e-04 - 110s/epoch - 101ms/sample
Epoch 71/100
1088/1088 - 110s - loss: 1.3497e-04 - acc: 1.0000 - val_loss: 2.1733 - val_acc: 0.6949 - lr: 1.2500e-04 - 110s/epoch - 101ms/sample
Epoch 72/100
1088/1088 - 110s - loss: 2.1177e-04 - acc: 1.0000 - val_loss: 2.1772 - val_acc: 0.6912 - lr: 1.2500e-04 - 110s/epoch - 101ms/sample
Epoch 73/100
1088/1088 - 109s - loss: 2.0591e-04 - acc: 1.0000 - val_loss: 2.1805 - val_acc: 0.6912 - lr: 1.2500e-04 - 109s/epoch - 100ms/sample
Epoch 74/100
Epoch 74: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
1088/1088 - 111s - loss: 1.1104e-04 - acc: 1.0000 - val_loss: 2.1815 - val_acc: 0.6949 - lr: 1.2500e-04 - 111s/epoch - 102ms/sample
Epoch 75/100
1088/1088 - 112s - loss: 1.2774e-04 - acc: 1.0000 - val_loss: 2.1807 - val_acc: 0.6949 - lr: 6.2500e-05 - 112s/epoch - 103ms/sample
Epoch 76/100
1088/1088 - 111s - loss: 1.3732e-04 - acc: 1.0000 - val_loss: 2.1802 - val_acc: 0.6912 - lr: 6.2500e-05 - 111s/epoch - 102ms/sample
Epoch 77/100
1088/1088 - 110s - loss: 9.0735e-05 - acc: 1.0000 - val_loss: 2.1801 - val_acc: 0.6912 - lr: 6.2500e-05 - 110s/epoch - 101ms/sample
Epoch 78/100
1088/1088 - 111s - loss: 1.5995e-04 - acc: 1.0000 - val_loss: 2.1797 - val_acc: 0.6912 - lr: 6.2500e-05 - 111s/epoch - 102ms/sample
Epoch 79/100
1088/1088 - 110s - loss: 1.6755e-04 - acc: 1.0000 - val_loss: 2.1810 - val_acc: 0.6912 - lr: 6.2500e-05 - 110s/epoch - 101ms/sample
Epoch 80/100
Epoch 80: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
1088/1088 - 110s - loss: 1.0040e-04 - acc: 1.0000 - val_loss: 2.1808 - val_acc: 0.6949 - lr: 6.2500e-05 - 110s/epoch - 101ms/sample
Epoch 81/100
1088/1088 - 110s - loss: 1.2052e-04 - acc: 1.0000 - val_loss: 2.1807 - val_acc: 0.6949 - lr: 3.1250e-05 - 110s/epoch - 101ms/sample
Epoch 82/100
1088/1088 - 110s - loss: 9.6121e-05 - acc: 1.0000 - val_loss: 2.1811 - val_acc: 0.6949 - lr: 3.1250e-05 - 110s/epoch - 101ms/sample
Epoch 83/100
1088/1088 - 110s - loss: 1.1534e-04 - acc: 1.0000 - val_loss: 2.1811 - val_acc: 0.6949 - lr: 3.1250e-05 - 110s/epoch - 101ms/sample
Epoch 84/100
1088/1088 - 110s - loss: 1.6216e-04 - acc: 1.0000 - val_loss: 2.1812 - val_acc: 0.6949 - lr: 3.1250e-05 - 110s/epoch - 101ms/sample
Epoch 85/100
1088/1088 - 109s - loss: 1.2675e-04 - acc: 1.0000 - val_loss: 2.1808 - val_acc: 0.6985 - lr: 3.1250e-05 - 109s/epoch - 101ms/sample
Epoch 86/100
Epoch 86: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
1088/1088 - 111s - loss: 9.1777e-05 - acc: 1.0000 - val_loss: 2.1809 - val_acc: 0.6985 - lr: 3.1250e-05 - 111s/epoch - 102ms/sample
Epoch 87/100
1088/1088 - 110s - loss: 1.6483e-04 - acc: 1.0000 - val_loss: 2.1806 - val_acc: 0.6985 - lr: 1.5625e-05 - 110s/epoch - 101ms/sample
Epoch 88/100
1088/1088 - 110s - loss: 1.0369e-04 - acc: 1.0000 - val_loss: 2.1807 - val_acc: 0.6985 - lr: 1.5625e-05 - 110s/epoch - 101ms/sample
Epoch 89/100
1088/1088 - 111s - loss: 1.1584e-04 - acc: 1.0000 - val_loss: 2.1801 - val_acc: 0.6985 - lr: 1.5625e-05 - 111s/epoch - 102ms/sample
Epoch 90/100
1088/1088 - 112s - loss: 2.0659e-04 - acc: 1.0000 - val_loss: 2.1800 - val_acc: 0.6985 - lr: 1.5625e-05 - 112s/epoch - 103ms/sample
Epoch 91/100
1088/1088 - 112s - loss: 7.9664e-05 - acc: 1.0000 - val_loss: 2.1798 - val_acc: 0.6949 - lr: 1.5625e-05 - 112s/epoch - 103ms/sample
Epoch 92/100
Epoch 92: ReduceLROnPlateau reducing learning rate to 1e-05.
1088/1088 - 112s - loss: 1.1472e-04 - acc: 1.0000 - val_loss: 2.1798 - val_acc: 0.6949 - lr: 1.5625e-05 - 112s/epoch - 103ms/sample
Epoch 93/100
1088/1088 - 115s - loss: 1.0412e-04 - acc: 1.0000 - val_loss: 2.1793 - val_acc: 0.6985 - lr: 1.0000e-05 - 115s/epoch - 106ms/sample
Epoch 94/100
1088/1088 - 111s - loss: 9.9932e-05 - acc: 1.0000 - val_loss: 2.1794 - val_acc: 0.6949 - lr: 1.0000e-05 - 111s/epoch - 102ms/sample
Epoch 95/100
1088/1088 - 112s - loss: 9.0217e-05 - acc: 1.0000 - val_loss: 2.1797 - val_acc: 0.6949 - lr: 1.0000e-05 - 112s/epoch - 103ms/sample
Epoch 96/100
1088/1088 - 110s - loss: 1.3981e-04 - acc: 1.0000 - val_loss: 2.1796 - val_acc: 0.6949 - lr: 1.0000e-05 - 110s/epoch - 101ms/sample
Epoch 97/100
1088/1088 - 111s - loss: 9.9800e-05 - acc: 1.0000 - val_loss: 2.1794 - val_acc: 0.6949 - lr: 1.0000e-05 - 111s/epoch - 102ms/sample
Epoch 98/100
1088/1088 - 110s - loss: 8.0561e-05 - acc: 1.0000 - val_loss: 2.1795 - val_acc: 0.6949 - lr: 1.0000e-05 - 110s/epoch - 101ms/sample
Epoch 99/100
1088/1088 - 109s - loss: 9.6025e-05 - acc: 1.0000 - val_loss: 2.1794 - val_acc: 0.6949 - lr: 1.0000e-05 - 109s/epoch - 100ms/sample
Epoch 100/100
1088/1088 - 110s - loss: 5.7708e-05 - acc: 1.0000 - val_loss: 2.1790 - val_acc: 0.6949 - lr: 1.0000e-05 - 110s/epoch - 101ms/sample
oxf_cnn_model_result = oxf_cnn_model.evaluate(X_test, y_test)
print('Test loss: ', oxf_cnn_model_result[0])
print('Test accuracy: ', oxf_cnn_model_result[1])
Test loss: 2.1790304289144626 Test accuracy: 0.69485295
loss_train = oxf_cnn_model_fitted.history['loss']
loss_val = oxf_cnn_model_fitted.history['val_loss']
epochs = range(1,epoch+1)
plt.plot(epochs, loss_train, 'g', label='Training loss')
plt.plot(epochs, loss_val, 'b', label='validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
Acc_train = oxf_cnn_model_fitted.history['acc']
Acc_val = oxf_cnn_model_fitted.history['val_acc']
epochs = range(1,epoch+1)
plt.plot(epochs, Acc_train, 'g', label='Training accuracy')
plt.plot(epochs, Acc_val, 'b', label='validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
#Saving the model for future use
oxf_cnn_model.save('./oxf_cnn_model.h5')
oxf_cnn_model.save_weights('./oxf_cnn_model_weights.h5')
# Let us first show the Prediction.jpg
plt.figure(figsize=(5,5))
tst_image = cv2.imread('./Prediction.jpg')
tst_image = cv2.resize(tst_image,(224,224))
test_image = cv2.cvtColor(tst_image, cv2.COLOR_BGR2RGB)
# Shape of the test image
print("Shape of the test image : {}".format(test_image.shape))
plt.imshow(test_image)
plt.axis('off')
plt.show()
Shape of the test image : (224, 224, 3)
import cv2
tst_image = cv2.imread('./Prediction.jpg')
# Resize the image to 224X224 shape to be compatible with the model
tst_image = cv2.resize(tst_image,(224,224))
test_image = cv2.cvtColor(tst_image, cv2.COLOR_BGR2RGB)
# Shape of the test image
print("Shape of the test image : {}".format(test_image.shape))
# Normalizing the image
test_image = np.expand_dims(test_image, axis = 0)
test_image =test_image*1/255.0
#Check the size of the Image
print("Shape of the test image after expand_dims : {}".format(test_image.shape))
#Predict the result of the test image
result = oxf_cnn_model.predict(test_image)
# Get the class dictionary
print("\nClass dictionary :{}".format(class_dict))
# Creating a list of classes in test set for showing the result as the folder name
prediction_class = []
for class_name,index in class_dict.items():
prediction_class.append(class_name)
#Showing probabilities against each class
print("\nFinal predicted probabilities for the test image :")
print([format(i, '.12f') for i in result[0]])
# Index of the class with maximum probability
predicted_index = np.argmax(result[0])
# Print the name of the class
print("\nFinal predicted class for the test image : {}".format(prediction_class[predicted_index]))
Shape of the test image : (224, 224, 3)
Shape of the test image after expand_dims : (1, 224, 224, 3)
Class dictionary :{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16}
Final predicted probabilities for the test image :
['0.000000000188', '0.000000000000', '0.999989032745', '0.000000000009', '0.000000000000', '0.000000000000', '0.000000000000', '0.000011020747', '0.000000000000', '0.000000000000', '0.000000000000', '0.000000000038', '0.000000000000', '0.000000000017', '0.000000000000', '0.000000000000', '0.000000000000']
Final predicted class for the test image : 2